summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/common/fifo/submit.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c6
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c22
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c26
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c22
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h36
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h48
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h40
11 files changed, 110 insertions, 110 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c
index d034f2d3..7f2f677d 100644
--- a/drivers/gpu/nvgpu/common/fifo/submit.c
+++ b/drivers/gpu/nvgpu/common/fifo/submit.c
@@ -179,7 +179,7 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, 179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0,
180 (u32 *)cmd->mem->cpu_va + cmd->off); 180 (u32 *)cmd->mem->cpu_va + cmd->off);
181 181
182 c->gpfifo.put = (c->gpfifo.put + 1) & (c->gpfifo.entry_num - 1); 182 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U);
183} 183}
184 184
185static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, 185static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
@@ -286,7 +286,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
286 trace_write_pushbuffers(c, num_entries); 286 trace_write_pushbuffers(c, num_entries);
287 287
288 c->gpfifo.put = (c->gpfifo.put + num_entries) & 288 c->gpfifo.put = (c->gpfifo.put + num_entries) &
289 (c->gpfifo.entry_num - 1); 289 (c->gpfifo.entry_num - 1U);
290 290
291 return 0; 291 return 0;
292} 292}
@@ -307,7 +307,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
307 struct channel_gk20a_job *job = NULL; 307 struct channel_gk20a_job *job = NULL;
308 /* we might need two extra gpfifo entries - one for pre fence 308 /* we might need two extra gpfifo entries - one for pre fence
309 * and one for post fence. */ 309 * and one for post fence. */
310 const int extra_entries = 2; 310 const u32 extra_entries = 2U;
311 bool skip_buffer_refcounting = (flags & 311 bool skip_buffer_refcounting = (flags &
312 NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING); 312 NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING);
313 int err = 0; 313 int err = 0;
@@ -330,7 +330,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
330 * Kernel can insert gpfifo entries before and after user gpfifos. 330 * Kernel can insert gpfifo entries before and after user gpfifos.
331 * So, add extra_entries in user request. Also, HW with fifo size N 331 * So, add extra_entries in user request. Also, HW with fifo size N
332 * can accept only N-1 entreis and so the below condition */ 332 * can accept only N-1 entreis and so the below condition */
333 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { 333 if (c->gpfifo.entry_num - 1U < num_entries + extra_entries) {
334 nvgpu_err(g, "not enough gpfifo space allocated"); 334 nvgpu_err(g, "not enough gpfifo space allocated");
335 return -ENOMEM; 335 return -ENOMEM;
336 } 336 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index d72629b5..86e56d9e 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -512,7 +512,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
512{ 512{
513 struct nvgpu_pmu *pmu = &g->pmu; 513 struct nvgpu_pmu *pmu = &g->pmu;
514 struct pmu_pg_stats_data pg_stat_data = { 0 }; 514 struct pmu_pg_stats_data pg_stat_data = { 0 };
515 int i; 515 u32 i;
516 516
517 nvgpu_log_fn(g, " "); 517 nvgpu_log_fn(g, " ");
518 518
@@ -539,7 +539,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
539 pmu->isr_enabled = false; 539 pmu->isr_enabled = false;
540 nvgpu_mutex_release(&pmu->isr_mutex); 540 nvgpu_mutex_release(&pmu->isr_mutex);
541 541
542 for (i = 0; i < PMU_QUEUE_COUNT; i++) { 542 for (i = 0U; i < PMU_QUEUE_COUNT; i++) {
543 nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]); 543 nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]);
544 } 544 }
545 545
@@ -559,7 +559,7 @@ void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
559{ 559{
560 fb->address.lo = u64_lo32(mem->gpu_va); 560 fb->address.lo = u64_lo32(mem->gpu_va);
561 fb->address.hi = u64_hi32(mem->gpu_va); 561 fb->address.hi = u64_hi32(mem->gpu_va);
562 fb->params = ((u32)mem->size & 0xFFFFFF); 562 fb->params = ((u32)mem->size & 0xFFFFFFU);
563 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24); 563 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24);
564} 564}
565 565
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index 87fd2f2a..bf54e0d6 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -37,12 +37,12 @@
37#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" 37#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
38 38
39/* PMU F/W version */ 39/* PMU F/W version */
40#define APP_VERSION_GPU_NEXT 24313845 40#define APP_VERSION_GPU_NEXT 24313845U
41#define APP_VERSION_GV11B 24379482 41#define APP_VERSION_GV11B 24379482U
42#define APP_VERSION_GV10X 23647491 42#define APP_VERSION_GV10X 23647491U
43#define APP_VERSION_GP10X 24076634 43#define APP_VERSION_GP10X 24076634U
44#define APP_VERSION_GP10B 23782727 44#define APP_VERSION_GP10B 23782727U
45#define APP_VERSION_GM20B 20490253 45#define APP_VERSION_GM20B 20490253U
46 46
47/* PMU version specific functions */ 47/* PMU version specific functions */
48static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) 48static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu)
@@ -82,7 +82,7 @@ static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid)
82 82
83static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) 83static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu)
84{ 84{
85 pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; 85 pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U;
86 pmu->args_v4.dma_addr.dma_base1 = 0; 86 pmu->args_v4.dma_addr.dma_base1 = 0;
87 pmu->args_v4.dma_addr.dma_offset = 0; 87 pmu->args_v4.dma_addr.dma_offset = 0;
88} 88}
@@ -182,7 +182,7 @@ static void set_pmu_cmdline_args_falctracesize_v3(
182 182
183static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) 183static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu)
184{ 184{
185 pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; 185 pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U;
186} 186}
187 187
188static void set_pmu_cmdline_args_falctracedmaidx_v3( 188static void set_pmu_cmdline_args_falctracedmaidx_v3(
@@ -882,7 +882,7 @@ static void get_pmu_init_msg_pmu_queue_params_v4(
882 882
883 queue->index = init->queue_index[tmp_id]; 883 queue->index = init->queue_index[tmp_id];
884 queue->size = init->queue_size[tmp_id]; 884 queue->size = init->queue_size[tmp_id];
885 if (tmp_id != 0) { 885 if (tmp_id != 0U) {
886 for (i = 0 ; i < tmp_id; i++) { 886 for (i = 0 ; i < tmp_id; i++) {
887 current_ptr += init->queue_size[i]; 887 current_ptr += init->queue_size[i];
888 } 888 }
@@ -911,7 +911,7 @@ static void get_pmu_init_msg_pmu_queue_params_v5(
911 911
912 queue->index = init->queue_index[tmp_id]; 912 queue->index = init->queue_index[tmp_id];
913 queue->size = init->queue_size[tmp_id]; 913 queue->size = init->queue_size[tmp_id];
914 if (tmp_id != 0) { 914 if (tmp_id != 0U) {
915 for (i = 0 ; i < tmp_id; i++) { 915 for (i = 0 ; i < tmp_id; i++) {
916 current_ptr += init->queue_size[i]; 916 current_ptr += init->queue_size[i];
917 } 917 }
@@ -940,7 +940,7 @@ static void get_pmu_init_msg_pmu_queue_params_v3(
940 } 940 }
941 queue->index = init->queue_index[tmp_id]; 941 queue->index = init->queue_index[tmp_id];
942 queue->size = init->queue_size[tmp_id]; 942 queue->size = init->queue_size[tmp_id];
943 if (tmp_id != 0) { 943 if (tmp_id != 0U) {
944 for (i = 0 ; i < tmp_id; i++) { 944 for (i = 0 ; i < tmp_id; i++) {
945 current_ptr += init->queue_size[i]; 945 current_ptr += init->queue_size[i];
946 } 946 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 39be07cc..68654a70 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -184,9 +184,9 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
184 goto invalid_cmd; 184 goto invalid_cmd;
185 } 185 }
186 186
187 if ((payload->in.buf != NULL && payload->in.size == 0) || 187 if ((payload->in.buf != NULL && payload->in.size == 0U) ||
188 (payload->out.buf != NULL && payload->out.size == 0) || 188 (payload->out.buf != NULL && payload->out.size == 0U) ||
189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) { 189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0U)) {
190 goto invalid_cmd; 190 goto invalid_cmd;
191 } 191 }
192 192
@@ -207,8 +207,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
207 } 207 }
208 208
209 209
210 if ((payload->in.offset != 0 && payload->in.buf == NULL) || 210 if ((payload->in.offset != 0U && payload->in.buf == NULL) ||
211 (payload->out.offset != 0 && payload->out.buf == NULL)) { 211 (payload->out.offset != 0U && payload->out.buf == NULL)) {
212 goto invalid_cmd; 212 goto invalid_cmd;
213 } 213 }
214 214
@@ -316,7 +316,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
316 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 } 317 }
318 318
319 if (payload && payload->in.offset != 0) { 319 if (payload && payload->in.offset != 0U) {
320 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
321 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
322 322
@@ -335,7 +335,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
335 goto clean_up; 335 goto clean_up;
336 } 336 }
337 337
338 if (payload->in.fb_size != 0x0) { 338 if (payload->in.fb_size != 0x0U) {
339 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
340 sizeof(struct nvgpu_mem)); 340 sizeof(struct nvgpu_mem));
341 if (!seq->in_mem) { 341 if (!seq->in_mem) {
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
365 pv->pmu_allocation_get_dmem_offset(pmu, in)); 365 pv->pmu_allocation_get_dmem_offset(pmu, in));
366 } 366 }
367 367
368 if (payload && payload->out.offset != 0) { 368 if (payload && payload->out.offset != 0U) {
369 pv->set_pmu_allocation_ptr(pmu, &out, 369 pv->set_pmu_allocation_ptr(pmu, &out,
370 ((u8 *)&cmd->cmd + payload->out.offset)); 370 ((u8 *)&cmd->cmd + payload->out.offset));
371 pv->pmu_allocation_set_dmem_size(pmu, out, 371 pv->pmu_allocation_set_dmem_size(pmu, out,
@@ -381,7 +381,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
381 goto clean_up; 381 goto clean_up;
382 } 382 }
383 383
384 if (payload->out.fb_size != 0x0) { 384 if (payload->out.fb_size != 0x0U) {
385 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
386 sizeof(struct nvgpu_mem)); 386 sizeof(struct nvgpu_mem));
387 if (!seq->out_mem) { 387 if (!seq->out_mem) {
@@ -534,7 +534,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
534 } 534 }
535 } 535 }
536 if (pv->pmu_allocation_get_dmem_size(pmu, 536 if (pv->pmu_allocation_get_dmem_size(pmu,
537 pv->get_pmu_seq_out_a_ptr(seq)) != 0) { 537 pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
538 nvgpu_flcn_copy_from_dmem(pmu->flcn, 538 nvgpu_flcn_copy_from_dmem(pmu->flcn,
539 pv->pmu_allocation_get_dmem_offset(pmu, 539 pv->pmu_allocation_get_dmem_offset(pmu,
540 pv->get_pmu_seq_out_a_ptr(seq)), 540 pv->get_pmu_seq_out_a_ptr(seq)),
@@ -546,13 +546,13 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
546 seq->callback = NULL; 546 seq->callback = NULL;
547 } 547 }
548 if (pv->pmu_allocation_get_dmem_size(pmu, 548 if (pv->pmu_allocation_get_dmem_size(pmu,
549 pv->get_pmu_seq_in_a_ptr(seq)) != 0) { 549 pv->get_pmu_seq_in_a_ptr(seq)) != 0U) {
550 nvgpu_free(&pmu->dmem, 550 nvgpu_free(&pmu->dmem,
551 pv->pmu_allocation_get_dmem_offset(pmu, 551 pv->pmu_allocation_get_dmem_offset(pmu,
552 pv->get_pmu_seq_in_a_ptr(seq))); 552 pv->get_pmu_seq_in_a_ptr(seq)));
553 } 553 }
554 if (pv->pmu_allocation_get_dmem_size(pmu, 554 if (pv->pmu_allocation_get_dmem_size(pmu,
555 pv->get_pmu_seq_out_a_ptr(seq)) != 0) { 555 pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
556 nvgpu_free(&pmu->dmem, 556 nvgpu_free(&pmu->dmem,
557 pv->pmu_allocation_get_dmem_offset(pmu, 557 pv->pmu_allocation_get_dmem_offset(pmu,
558 pv->get_pmu_seq_out_a_ptr(seq))); 558 pv->get_pmu_seq_out_a_ptr(seq)));
@@ -748,7 +748,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
748 gk20a_pmu_isr(g); 748 gk20a_pmu_isr(g);
749 } 749 }
750 750
751 nvgpu_usleep_range(delay, delay * 2); 751 nvgpu_usleep_range(delay, delay * 2U);
752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
753 } while (!nvgpu_timeout_expired(&timeout)); 753 } while (!nvgpu_timeout_expired(&timeout));
754 754
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
index 73893f2c..5d736591 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
@@ -77,7 +77,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
77 77
78 if (!pmu->sample_buffer) { 78 if (!pmu->sample_buffer) {
79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
80 2 * sizeof(u16)); 80 2U * sizeof(u16));
81 } 81 }
82 if (!pmu->sample_buffer) { 82 if (!pmu->sample_buffer) {
83 nvgpu_err(g, "failed to allocate perfmon sample buffer"); 83 nvgpu_err(g, "failed to allocate perfmon sample buffer");
@@ -215,7 +215,7 @@ int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load)
215int nvgpu_pmu_load_update(struct gk20a *g) 215int nvgpu_pmu_load_update(struct gk20a *g)
216{ 216{
217 struct nvgpu_pmu *pmu = &g->pmu; 217 struct nvgpu_pmu *pmu = &g->pmu;
218 u16 load = 0; 218 u32 load = 0;
219 219
220 if (!pmu->perfmon_ready) { 220 if (!pmu->perfmon_ready) {
221 pmu->load_shadow = 0; 221 pmu->load_shadow = 0;
@@ -231,8 +231,8 @@ int nvgpu_pmu_load_update(struct gk20a *g)
231 (u8 *)&load, 2 * 1, 0); 231 (u8 *)&load, 2 * 1, 0);
232 } 232 }
233 233
234 pmu->load_shadow = load / 10; 234 pmu->load_shadow = load / 10U;
235 pmu->load_avg = (((9*pmu->load_avg) + pmu->load_shadow) / 10); 235 pmu->load_avg = (((9U*pmu->load_avg) + pmu->load_shadow) / 10U);
236 236
237 return 0; 237 return 0;
238} 238}
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index 4978708c..76ed0621 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -34,17 +34,17 @@
34 * ON => OFF is always synchronized 34 * ON => OFF is always synchronized
35 */ 35 */
36/* elpg is off */ 36/* elpg is off */
37#define PMU_ELPG_STAT_OFF 0 37#define PMU_ELPG_STAT_OFF 0U
38/* elpg is on */ 38/* elpg is on */
39#define PMU_ELPG_STAT_ON 1 39#define PMU_ELPG_STAT_ON 1U
40/* elpg is off, ALLOW cmd has been sent, wait for ack */ 40/* elpg is off, ALLOW cmd has been sent, wait for ack */
41#define PMU_ELPG_STAT_ON_PENDING 2 41#define PMU_ELPG_STAT_ON_PENDING 2U
42/* elpg is on, DISALLOW cmd has been sent, wait for ack */ 42/* elpg is on, DISALLOW cmd has been sent, wait for ack */
43#define PMU_ELPG_STAT_OFF_PENDING 3 43#define PMU_ELPG_STAT_OFF_PENDING 3U
44/* elpg is off, caller has requested on, but ALLOW 44/* elpg is off, caller has requested on, but ALLOW
45 * cmd hasn't been sent due to ENABLE_ALLOW delay 45 * cmd hasn't been sent due to ENABLE_ALLOW delay
46 */ 46 */
47#define PMU_ELPG_STAT_OFF_ON_PENDING 4 47#define PMU_ELPG_STAT_OFF_ON_PENDING 4U
48 48
49#define PMU_PGENG_GR_BUFFER_IDX_INIT (0) 49#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
50#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) 50#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
@@ -58,7 +58,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
58 58
59 nvgpu_log_fn(g, " "); 59 nvgpu_log_fn(g, " ");
60 60
61 if (status != 0) { 61 if (status != 0U) {
62 nvgpu_err(g, "ELPG cmd aborted"); 62 nvgpu_err(g, "ELPG cmd aborted");
63 /* TBD: disable ELPG */ 63 /* TBD: disable ELPG */
64 return; 64 return;
@@ -174,7 +174,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
175 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 175 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
176 pmu, &seq, ~0); 176 pmu, &seq, ~0);
177 WARN_ON(status != 0); 177 WARN_ON(status != 0U);
178 178
179 nvgpu_log_fn(g, "done"); 179 nvgpu_log_fn(g, "done");
180 return 0; 180 return 0;
@@ -368,7 +368,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
368 368
369 nvgpu_log_fn(g, " "); 369 nvgpu_log_fn(g, " ");
370 370
371 if (status != 0) { 371 if (status != 0U) {
372 nvgpu_err(g, "ELPG cmd aborted"); 372 nvgpu_err(g, "ELPG cmd aborted");
373 /* TBD: disable ELPG */ 373 /* TBD: disable ELPG */
374 return; 374 return;
@@ -507,7 +507,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
507 507
508 nvgpu_pmu_dbg(g, 508 nvgpu_pmu_dbg(g,
509 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 509 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
510 if (status != 0) { 510 if (status != 0U) {
511 nvgpu_err(g, "PGENG cmd aborted"); 511 nvgpu_err(g, "PGENG cmd aborted");
512 /* TBD: disable ELPG */ 512 /* TBD: disable ELPG */
513 return; 513 return;
@@ -549,7 +549,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
549 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, 549 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
550 u64_lo32(pmu->pg_buf.gpu_va)); 550 u64_lo32(pmu->pg_buf.gpu_va));
551 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, 551 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
552 (u8)(pmu->pg_buf.gpu_va & 0xFF)); 552 (u8)(pmu->pg_buf.gpu_va & 0xFFU));
553 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, 553 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
554 PMU_DMAIDX_VIRT); 554 PMU_DMAIDX_VIRT);
555 555
@@ -590,7 +590,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
590 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, 590 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
591 u64_lo32(pmu->seq_buf.gpu_va)); 591 u64_lo32(pmu->seq_buf.gpu_va));
592 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, 592 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
593 (u8)(pmu->seq_buf.gpu_va & 0xFF)); 593 (u8)(pmu->seq_buf.gpu_va & 0xFFU));
594 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, 594 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
595 PMU_DMAIDX_VIRT); 595 PMU_DMAIDX_VIRT);
596 596
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index 28374b9d..1240530f 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -84,21 +84,21 @@
84#define PMU_FALCON_REG_SIZE (32) 84#define PMU_FALCON_REG_SIZE (32)
85 85
86/* Choices for pmu_state */ 86/* Choices for pmu_state */
87#define PMU_STATE_OFF 0 /* PMU is off */ 87#define PMU_STATE_OFF 0U /* PMU is off */
88#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ 88#define PMU_STATE_STARTING 1U /* PMU is on, but not booted */
89#define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ 89#define PMU_STATE_INIT_RECEIVED 2U /* PMU init message received */
90#define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ 90#define PMU_STATE_ELPG_BOOTING 3U /* PMU is booting */
91#define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ 91#define PMU_STATE_ELPG_BOOTED 4U /* ELPG is initialized */
92#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ 92#define PMU_STATE_LOADING_PG_BUF 5U /* Loading PG buf */
93#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ 93#define PMU_STATE_LOADING_ZBC 6U /* Loading ZBC buf */
94#define PMU_STATE_STARTED 7 /* Fully unitialized */ 94#define PMU_STATE_STARTED 7U /* Fully unitialized */
95#define PMU_STATE_EXIT 8 /* Exit PMU state machine */ 95#define PMU_STATE_EXIT 8U /* Exit PMU state machine */
96 96
97#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 97#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32U
98#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 98#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64U
99 99
100#define PMU_MAX_NUM_SEQUENCES (256) 100#define PMU_MAX_NUM_SEQUENCES (256U)
101#define PMU_SEQ_BIT_SHIFT (5) 101#define PMU_SEQ_BIT_SHIFT (5U)
102#define PMU_SEQ_TBL_SIZE \ 102#define PMU_SEQ_TBL_SIZE \
103 (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) 103 (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT)
104 104
@@ -132,8 +132,8 @@ enum {
132#define PMU_PG_LPWR_FEATURE_RPPG 0x0 132#define PMU_PG_LPWR_FEATURE_RPPG 0x0
133#define PMU_PG_LPWR_FEATURE_MSCG 0x1 133#define PMU_PG_LPWR_FEATURE_MSCG 0x1
134 134
135#define PMU_MSCG_DISABLED 0 135#define PMU_MSCG_DISABLED 0U
136#define PMU_MSCG_ENABLED 1 136#define PMU_MSCG_ENABLED 1U
137 137
138/* Default Sampling Period of AELPG */ 138/* Default Sampling Period of AELPG */
139#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000) 139#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
@@ -350,7 +350,7 @@ struct nvgpu_pmu {
350 u32 mscg_stat; 350 u32 mscg_stat;
351 u32 mscg_transition_state; 351 u32 mscg_transition_state;
352 352
353 int pmu_state; 353 u32 pmu_state;
354 354
355#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ 355#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
356 struct nvgpu_pg_init pg_init; 356 struct nvgpu_pg_init pg_init;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
index 68df80b4..e3317805 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
@@ -32,7 +32,7 @@
32#define PMU_COMMAND_QUEUE_LPQ 1U 32#define PMU_COMMAND_QUEUE_LPQ 1U
33/* write by pmu, read by sw, accessed by interrupt handler, no lock */ 33/* write by pmu, read by sw, accessed by interrupt handler, no lock */
34#define PMU_MESSAGE_QUEUE 4U 34#define PMU_MESSAGE_QUEUE 4U
35#define PMU_QUEUE_COUNT 5 35#define PMU_QUEUE_COUNT 5U
36 36
37#define PMU_IS_COMMAND_QUEUE(id) \ 37#define PMU_IS_COMMAND_QUEUE(id) \
38 ((id) < PMU_MESSAGE_QUEUE) 38 ((id) < PMU_MESSAGE_QUEUE)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h
index 91e89365..ba6e9ec8 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h
@@ -32,7 +32,7 @@
32#define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002) 32#define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002)
33#define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004) 33#define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004)
34 34
35#define NV_PMU_PERFMON_MAX_COUNTERS 10 35#define NV_PMU_PERFMON_MAX_COUNTERS 10U
36 36
37enum pmu_perfmon_cmd_start_fields { 37enum pmu_perfmon_cmd_start_fields {
38 COUNTER_ALLOC 38 COUNTER_ALLOC
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h
index 1ba9963c..c156a6c0 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h
@@ -28,9 +28,9 @@
28/*PG defines*/ 28/*PG defines*/
29 29
30/* Identifier for each PG */ 30/* Identifier for each PG */
31#define PMU_PG_ELPG_ENGINE_ID_GRAPHICS (0x00000000) 31#define PMU_PG_ELPG_ENGINE_ID_GRAPHICS (0x00000000U)
32#define PMU_PG_ELPG_ENGINE_ID_MS (0x00000004) 32#define PMU_PG_ELPG_ENGINE_ID_MS (0x00000004U)
33#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000005) 33#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000005U)
34#define PMU_PG_ELPG_ENGINE_MAX PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE 34#define PMU_PG_ELPG_ENGINE_MAX PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE
35 35
36/* PG message */ 36/* PG message */
@@ -173,23 +173,23 @@ enum {
173 SLOWDOWN_FACTOR_FPDIV_BYMAX, 173 SLOWDOWN_FACTOR_FPDIV_BYMAX,
174}; 174};
175 175
176#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0 176#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0U
177#define PMU_PG_PARAM_CMD_MS_INIT_PARAM 0x01 177#define PMU_PG_PARAM_CMD_MS_INIT_PARAM 0x01U
178#define PMU_PG_PARAM_CMD_MCLK_CHANGE 0x04 178#define PMU_PG_PARAM_CMD_MCLK_CHANGE 0x04U
179#define PMU_PG_PARAM_CMD_POST_INIT 0x06 179#define PMU_PG_PARAM_CMD_POST_INIT 0x06U
180#define PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE 0x07 180#define PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE 0x07U
181 181
182#define NVGPU_PMU_GR_FEATURE_MASK_SDIV_SLOWDOWN (1 << 0) 182#define NVGPU_PMU_GR_FEATURE_MASK_SDIV_SLOWDOWN BIT32(0)
183#define NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING (1 << 2) 183#define NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING BIT32(2)
184#define NVGPU_PMU_GR_FEATURE_MASK_RPPG (1 << 3) 184#define NVGPU_PMU_GR_FEATURE_MASK_RPPG BIT32(3)
185#define NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING (1 << 5) 185#define NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING BIT32(5)
186#define NVGPU_PMU_GR_FEATURE_MASK_UNBIND (1 << 6) 186#define NVGPU_PMU_GR_FEATURE_MASK_UNBIND BIT32(6)
187#define NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE (1 << 7) 187#define NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE BIT32(7)
188#define NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY (1 << 8) 188#define NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY BIT32(8)
189#define NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE (1 << 9) 189#define NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE BIT32(9)
190#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM (1 << 10) 190#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM BIT32(10)
191#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC (1 << 11) 191#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC BIT32(11)
192#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG (1 << 12) 192#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG BIT32(12)
193 193
194#define NVGPU_PMU_GR_FEATURE_MASK_ALL \ 194#define NVGPU_PMU_GR_FEATURE_MASK_ALL \
195 ( \ 195 ( \
@@ -206,10 +206,10 @@ enum {
206 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG \ 206 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG \
207 ) 207 )
208 208
209#define NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING (1 << 0) 209#define NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING BIT32(0)
210#define NVGPU_PMU_MS_FEATURE_MASK_SW_ASR (1 << 1) 210#define NVGPU_PMU_MS_FEATURE_MASK_SW_ASR BIT32(1)
211#define NVGPU_PMU_MS_FEATURE_MASK_RPPG (1 << 8) 211#define NVGPU_PMU_MS_FEATURE_MASK_RPPG BIT32(8)
212#define NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING (1 << 5) 212#define NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING BIT32(5)
213 213
214#define NVGPU_PMU_MS_FEATURE_MASK_ALL \ 214#define NVGPU_PMU_MS_FEATURE_MASK_ALL \
215 ( \ 215 ( \
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
index 1a05ec29..06486006 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
@@ -59,7 +59,7 @@ struct nv_pmu_rpc_cmd {
59 u32 rpc_dmem_ptr; 59 u32 rpc_dmem_ptr;
60}; 60};
61 61
62#define NV_PMU_RPC_CMD_ID 0x80 62#define NV_PMU_RPC_CMD_ID 0x80U
63 63
64/* Message carrying the result of the RPC execution */ 64/* Message carrying the result of the RPC execution */
65struct nv_pmu_rpc_msg { 65struct nv_pmu_rpc_msg {
@@ -79,7 +79,7 @@ struct nv_pmu_rpc_msg {
79 u32 rpc_dmem_ptr; 79 u32 rpc_dmem_ptr;
80}; 80};
81 81
82#define NV_PMU_RPC_MSG_ID 0x80 82#define NV_PMU_RPC_MSG_ID 0x80U
83 83
84struct pmu_cmd { 84struct pmu_cmd {
85 struct pmu_hdr hdr; 85 struct pmu_hdr hdr;
@@ -116,26 +116,26 @@ struct pmu_msg {
116 } msg; 116 } msg;
117}; 117};
118 118
119#define PMU_UNIT_REWIND (0x00) 119#define PMU_UNIT_REWIND (0x00U)
120#define PMU_UNIT_PG (0x03) 120#define PMU_UNIT_PG (0x03U)
121#define PMU_UNIT_INIT (0x07) 121#define PMU_UNIT_INIT (0x07U)
122#define PMU_UNIT_ACR (0x0A) 122#define PMU_UNIT_ACR (0x0AU)
123#define PMU_UNIT_PERFMON_T18X (0x11) 123#define PMU_UNIT_PERFMON_T18X (0x11U)
124#define PMU_UNIT_PERFMON (0x12) 124#define PMU_UNIT_PERFMON (0x12U)
125#define PMU_UNIT_PERF (0x13) 125#define PMU_UNIT_PERF (0x13U)
126#define PMU_UNIT_RC (0x1F) 126#define PMU_UNIT_RC (0x1FU)
127#define PMU_UNIT_FECS_MEM_OVERRIDE (0x1E) 127#define PMU_UNIT_FECS_MEM_OVERRIDE (0x1EU)
128#define PMU_UNIT_CLK (0x0D) 128#define PMU_UNIT_CLK (0x0DU)
129#define PMU_UNIT_THERM (0x14) 129#define PMU_UNIT_THERM (0x14U)
130#define PMU_UNIT_PMGR (0x18) 130#define PMU_UNIT_PMGR (0x18U)
131#define PMU_UNIT_VOLT (0x0E) 131#define PMU_UNIT_VOLT (0x0EU)
132 132
133#define PMU_UNIT_END (0x23) 133#define PMU_UNIT_END (0x23U)
134#define PMU_UNIT_INVALID (0xFF) 134#define PMU_UNIT_INVALID (0xFFU)
135 135
136#define PMU_UNIT_TEST_START (0xFE) 136#define PMU_UNIT_TEST_START (0xFEU)
137#define PMU_UNIT_END_SIM (0xFF) 137#define PMU_UNIT_END_SIM (0xFFU)
138#define PMU_UNIT_TEST_END (0xFF) 138#define PMU_UNIT_TEST_END (0xFFU)
139 139
140#define PMU_UNIT_ID_IS_VALID(id) \ 140#define PMU_UNIT_ID_IS_VALID(id) \
141 (((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START)) 141 (((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START))