summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/pmu
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c4
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c44
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c10
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c23
5 files changed, 45 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index 0395e463..6d1d5f00 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -170,8 +170,8 @@ void nvgpu_kill_task_pg_init(struct gk20a *g)
170 break; 170 break;
171 } 171 }
172 nvgpu_udelay(2); 172 nvgpu_udelay(2);
173 } while (!nvgpu_timeout_expired_msg(&timeout, 173 } while (nvgpu_timeout_expired_msg(&timeout,
174 "timeout - waiting PMU state machine thread stop")); 174 "timeout - waiting PMU state machine thread stop") == 0);
175 } 175 }
176} 176}
177 177
@@ -214,7 +214,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
214 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); 214 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size();
215 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * 215 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
216 sizeof(struct pmu_mutex)); 216 sizeof(struct pmu_mutex));
217 if (!pmu->mutex) { 217 if (pmu->mutex == NULL) {
218 err = -ENOMEM; 218 err = -ENOMEM;
219 goto err; 219 goto err;
220 } 220 }
@@ -226,7 +226,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
226 226
227 pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES * 227 pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES *
228 sizeof(struct pmu_sequence)); 228 sizeof(struct pmu_sequence));
229 if (!pmu->seq) { 229 if (pmu->seq == NULL) {
230 err = -ENOMEM; 230 err = -ENOMEM;
231 goto err_free_mutex; 231 goto err_free_mutex;
232 } 232 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index bf54e0d6..a94453fb 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -1738,12 +1738,12 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
1738 1738
1739 nvgpu_log_fn(g, " "); 1739 nvgpu_log_fn(g, " ");
1740 1740
1741 if (pmu->fw) { 1741 if (pmu->fw != NULL) {
1742 return nvgpu_init_pmu_fw_support(pmu); 1742 return nvgpu_init_pmu_fw_support(pmu);
1743 } 1743 }
1744 1744
1745 pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); 1745 pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0);
1746 if (!pmu->fw) { 1746 if (pmu->fw == NULL) {
1747 nvgpu_err(g, "failed to load pmu ucode!!"); 1747 nvgpu_err(g, "failed to load pmu ucode!!");
1748 return err; 1748 return err;
1749 } 1749 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 9fe999ae..6f88260f 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -221,7 +221,7 @@ invalid_cmd:
221 "payload in=%p, in_size=%d, in_offset=%d,\n" 221 "payload in=%p, in_size=%d, in_offset=%d,\n"
222 "payload out=%p, out_size=%d, out_offset=%d", 222 "payload out=%p, out_size=%d, out_offset=%d",
223 queue_id, cmd->hdr.size, cmd->hdr.unit_id, 223 queue_id, cmd->hdr.size, cmd->hdr.unit_id,
224 msg, msg ? msg->hdr.unit_id : ~0, 224 msg, (msg != NULL) ? msg->hdr.unit_id : ~0,
225 &payload->in, payload->in.size, payload->in.offset, 225 &payload->in, payload->in.size, payload->in.offset,
226 &payload->out, payload->out.size, payload->out.offset); 226 &payload->out, payload->out.size, payload->out.offset);
227 227
@@ -243,7 +243,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
243 243
244 do { 244 do {
245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); 245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
246 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) { 246 if (err == -EAGAIN && nvgpu_timeout_expired(&timeout) == 0) {
247 nvgpu_usleep_range(1000, 2000); 247 nvgpu_usleep_range(1000, 2000);
248 } else { 248 } else {
249 break; 249 break;
@@ -273,7 +273,7 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
273 dmem_alloc_size = payload->rpc.size_rpc + 273 dmem_alloc_size = payload->rpc.size_rpc +
274 payload->rpc.size_scratch; 274 payload->rpc.size_scratch;
275 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size); 275 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size);
276 if (!dmem_alloc_offset) { 276 if (dmem_alloc_offset == 0U) {
277 err = -ENOMEM; 277 err = -ENOMEM;
278 goto clean_up; 278 goto clean_up;
279 } 279 }
@@ -312,11 +312,11 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
312 312
313 nvgpu_log_fn(g, " "); 313 nvgpu_log_fn(g, " ");
314 314
315 if (payload) { 315 if (payload != NULL) {
316 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 } 317 }
318 318
319 if (payload && payload->in.offset != 0U) { 319 if (payload != NULL && payload->in.offset != 0U) {
320 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
321 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
322 322
@@ -331,14 +331,14 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
332 nvgpu_alloc(&pmu->dmem, 332 nvgpu_alloc(&pmu->dmem,
333 pv->pmu_allocation_get_dmem_size(pmu, in)); 333 pv->pmu_allocation_get_dmem_size(pmu, in));
334 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) { 334 if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) == 0U) {
335 goto clean_up; 335 goto clean_up;
336 } 336 }
337 337
338 if (payload->in.fb_size != 0x0U) { 338 if (payload->in.fb_size != 0x0U) {
339 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
340 sizeof(struct nvgpu_mem)); 340 sizeof(struct nvgpu_mem));
341 if (!seq->in_mem) { 341 if (seq->in_mem == NULL) {
342 err = -ENOMEM; 342 err = -ENOMEM;
343 goto clean_up; 343 goto clean_up;
344 } 344 }
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
365 pv->pmu_allocation_get_dmem_offset(pmu, in)); 365 pv->pmu_allocation_get_dmem_offset(pmu, in));
366 } 366 }
367 367
368 if (payload && payload->out.offset != 0U) { 368 if (payload != NULL && payload->out.offset != 0U) {
369 pv->set_pmu_allocation_ptr(pmu, &out, 369 pv->set_pmu_allocation_ptr(pmu, &out,
370 ((u8 *)&cmd->cmd + payload->out.offset)); 370 ((u8 *)&cmd->cmd + payload->out.offset));
371 pv->pmu_allocation_set_dmem_size(pmu, out, 371 pv->pmu_allocation_set_dmem_size(pmu, out,
@@ -376,15 +376,15 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
376 nvgpu_alloc(&pmu->dmem, 376 nvgpu_alloc(&pmu->dmem,
377 pv->pmu_allocation_get_dmem_size(pmu, 377 pv->pmu_allocation_get_dmem_size(pmu,
378 out)); 378 out));
379 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 379 if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
380 out))) { 380 out)) == 0U) {
381 goto clean_up; 381 goto clean_up;
382 } 382 }
383 383
384 if (payload->out.fb_size != 0x0U) { 384 if (payload->out.fb_size != 0x0U) {
385 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
386 sizeof(struct nvgpu_mem)); 386 sizeof(struct nvgpu_mem));
387 if (!seq->out_mem) { 387 if (seq->out_mem == NULL) {
388 err = -ENOMEM; 388 err = -ENOMEM;
389 goto clean_up; 389 goto clean_up;
390 } 390 }
@@ -439,16 +439,16 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
439 439
440 nvgpu_log_fn(g, " "); 440 nvgpu_log_fn(g, " ");
441 441
442 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 442 if (cmd == NULL || seq_desc == NULL || !pmu->pmu_ready) {
443 if (!cmd) { 443 if (cmd == NULL) {
444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
445 } else if (!seq_desc) { 445 } else if (seq_desc == NULL) {
446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
447 } else { 447 } else {
448 nvgpu_warn(g, "%s(): PMU is not ready", __func__); 448 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
449 } 449 }
450 450
451 WARN_ON(1); 451 WARN_ON(true);
452 return -EINVAL; 452 return -EINVAL;
453 } 453 }
454 454
@@ -612,7 +612,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
612 err = g->ops.perf.handle_pmu_perf_event(g, 612 err = g->ops.perf.handle_pmu_perf_event(g,
613 (void *)&msg->msg.perf); 613 (void *)&msg->msg.perf);
614 } else { 614 } else {
615 WARN_ON(1); 615 WARN_ON(true);
616 } 616 }
617 break; 617 break;
618 case PMU_UNIT_THERM: 618 case PMU_UNIT_THERM:
@@ -641,7 +641,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
641 641
642 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, 642 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
643 PMU_MSG_HDR_SIZE, &bytes_read); 643 PMU_MSG_HDR_SIZE, &bytes_read);
644 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 644 if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
645 nvgpu_err(g, "fail to read msg from queue %d", queue->id); 645 nvgpu_err(g, "fail to read msg from queue %d", queue->id);
646 *status = err | -EINVAL; 646 *status = err | -EINVAL;
647 goto clean_up; 647 goto clean_up;
@@ -657,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
657 /* read again after rewind */ 657 /* read again after rewind */
658 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, 658 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
659 PMU_MSG_HDR_SIZE, &bytes_read); 659 PMU_MSG_HDR_SIZE, &bytes_read);
660 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 660 if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
661 nvgpu_err(g, 661 nvgpu_err(g,
662 "fail to read msg from queue %d", queue->id); 662 "fail to read msg from queue %d", queue->id);
663 *status = err | -EINVAL; 663 *status = err | -EINVAL;
@@ -676,7 +676,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
676 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; 676 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
677 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, 677 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg,
678 read_size, &bytes_read); 678 read_size, &bytes_read);
679 if (err || bytes_read != read_size) { 679 if (err != 0 || bytes_read != read_size) {
680 nvgpu_err(g, 680 nvgpu_err(g,
681 "fail to read msg from queue %d", queue->id); 681 "fail to read msg from queue %d", queue->id);
682 *status = err; 682 *status = err;
@@ -750,7 +750,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
750 750
751 nvgpu_usleep_range(delay, delay * 2U); 751 nvgpu_usleep_range(delay, delay * 2U);
752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
753 } while (!nvgpu_timeout_expired(&timeout)); 753 } while (nvgpu_timeout_expired(&timeout) == 0);
754 754
755 return -ETIMEDOUT; 755 return -ETIMEDOUT;
756} 756}
@@ -887,7 +887,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
887 if (caller_cb == NULL) { 887 if (caller_cb == NULL) {
888 rpc_payload = nvgpu_kzalloc(g, 888 rpc_payload = nvgpu_kzalloc(g,
889 sizeof(struct rpc_handler_payload) + size_rpc); 889 sizeof(struct rpc_handler_payload) + size_rpc);
890 if (!rpc_payload) { 890 if (rpc_payload == NULL) {
891 status = ENOMEM; 891 status = ENOMEM;
892 goto exit; 892 goto exit;
893 } 893 }
@@ -907,7 +907,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
907 } 907 }
908 rpc_payload = nvgpu_kzalloc(g, 908 rpc_payload = nvgpu_kzalloc(g,
909 sizeof(struct rpc_handler_payload)); 909 sizeof(struct rpc_handler_payload));
910 if (!rpc_payload) { 910 if (rpc_payload == NULL) {
911 status = ENOMEM; 911 status = ENOMEM;
912 goto exit; 912 goto exit;
913 } 913 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
index a99e86ce..12ab4422 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
@@ -51,7 +51,7 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
51 default: 51 default:
52 unit_id = PMU_UNIT_INVALID; 52 unit_id = PMU_UNIT_INVALID;
53 nvgpu_err(g, "no support for %x", ver); 53 nvgpu_err(g, "no support for %x", ver);
54 WARN_ON(1); 54 WARN_ON(true);
55 } 55 }
56 56
57 return unit_id; 57 return unit_id;
@@ -75,11 +75,11 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
75 75
76 g->ops.pmu.pmu_init_perfmon_counter(g); 76 g->ops.pmu.pmu_init_perfmon_counter(g);
77 77
78 if (!pmu->sample_buffer) { 78 if (pmu->sample_buffer == 0U) {
79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
80 2U * sizeof(u16)); 80 2U * sizeof(u16));
81 } 81 }
82 if (!pmu->sample_buffer) { 82 if (pmu->sample_buffer == 0U) {
83 nvgpu_err(g, "failed to allocate perfmon sample buffer"); 83 nvgpu_err(g, "failed to allocate perfmon sample buffer");
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
@@ -240,7 +240,7 @@ int nvgpu_pmu_load_update(struct gk20a *g)
240void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, 240void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
241 u32 *total_cycles) 241 u32 *total_cycles)
242{ 242{
243 if (!g->power_on || gk20a_busy(g)) { 243 if (!g->power_on || gk20a_busy(g) != 0) {
244 *busy_cycles = 0; 244 *busy_cycles = 0;
245 *total_cycles = 0; 245 *total_cycles = 0;
246 return; 246 return;
@@ -254,7 +254,7 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
254 254
255void nvgpu_pmu_reset_load_counters(struct gk20a *g) 255void nvgpu_pmu_reset_load_counters(struct gk20a *g)
256{ 256{
257 if (!g->power_on || gk20a_busy(g)) { 257 if (!g->power_on || gk20a_busy(g) != 0) {
258 return; 258 return;
259 } 259 }
260 260
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index 0758279d..d2615b1a 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -89,9 +89,9 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
89 } 89 }
90 90
91 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { 91 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
92 if (g->ops.pmu.pmu_pg_engines_feature_list && 92 if (g->ops.pmu.pmu_pg_engines_feature_list != NULL &&
93 g->ops.pmu.pmu_pg_engines_feature_list(g, 93 g->ops.pmu.pmu_pg_engines_feature_list(g,
94 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 94 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
95 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 95 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
96 pmu->initialized = true; 96 pmu->initialized = true;
97 nvgpu_pmu_state_change(g, PMU_STATE_STARTED, 97 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
@@ -117,9 +117,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
117 u32 status = 0; 117 u32 status = 0;
118 118
119 if (enable_pg == true) { 119 if (enable_pg == true) {
120 if (g->ops.pmu.pmu_pg_engines_feature_list && 120 if (g->ops.pmu.pmu_pg_engines_feature_list != NULL &&
121 g->ops.pmu.pmu_pg_engines_feature_list(g, 121 g->ops.pmu.pmu_pg_engines_feature_list(g,
122 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 122 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
123 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 123 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
124 if (g->ops.pmu.pmu_lpwr_enable_pg) { 124 if (g->ops.pmu.pmu_lpwr_enable_pg) {
125 status = g->ops.pmu.pmu_lpwr_enable_pg(g, 125 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
@@ -129,9 +129,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
129 status = nvgpu_pmu_enable_elpg(g); 129 status = nvgpu_pmu_enable_elpg(g);
130 } 130 }
131 } else if (enable_pg == false) { 131 } else if (enable_pg == false) {
132 if (g->ops.pmu.pmu_pg_engines_feature_list && 132 if (g->ops.pmu.pmu_pg_engines_feature_list != NULL &&
133 g->ops.pmu.pmu_pg_engines_feature_list(g, 133 g->ops.pmu.pmu_pg_engines_feature_list(g,
134 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 134 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
135 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 135 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
136 if (g->ops.pmu.pmu_lpwr_disable_pg) { 136 if (g->ops.pmu.pmu_lpwr_disable_pg) {
137 status = g->ops.pmu.pmu_lpwr_disable_pg(g, 137 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
@@ -207,7 +207,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
207 nvgpu_warn(g, 207 nvgpu_warn(g,
208 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 208 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
209 __func__, pmu->elpg_refcnt); 209 __func__, pmu->elpg_refcnt);
210 WARN_ON(1); 210 WARN_ON(true);
211 } 211 }
212 212
213 /* do NOT enable elpg until golden ctx is created, 213 /* do NOT enable elpg until golden ctx is created,
@@ -273,7 +273,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
273 nvgpu_warn(g, 273 nvgpu_warn(g,
274 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 274 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
275 __func__, pmu->elpg_refcnt); 275 __func__, pmu->elpg_refcnt);
276 WARN_ON(1); 276 WARN_ON(true);
277 ret = 0; 277 ret = 0;
278 goto exit_unlock; 278 goto exit_unlock;
279 } 279 }
@@ -481,7 +481,8 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
481 pg_engine_id++) { 481 pg_engine_id++) {
482 482
483 if (BIT(pg_engine_id) & pg_engine_id_list) { 483 if (BIT(pg_engine_id) & pg_engine_id_list) {
484 if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { 484 if (pmu != NULL &&
485 pmu->pmu_state == PMU_STATE_INIT_RECEIVED) {
485 nvgpu_pmu_state_change(g, 486 nvgpu_pmu_state_change(g,
486 PMU_STATE_ELPG_BOOTING, false); 487 PMU_STATE_ELPG_BOOTING, false);
487 } 488 }
@@ -636,9 +637,9 @@ static void ap_callback_init_and_enable_ctrl(
636 void *param, u32 seq_desc, u32 status) 637 void *param, u32 seq_desc, u32 status)
637{ 638{
638 /* Define p_ap (i.e pointer to pmu_ap structure) */ 639 /* Define p_ap (i.e pointer to pmu_ap structure) */
639 WARN_ON(!msg); 640 WARN_ON(msg == NULL);
640 641
641 if (!status) { 642 if (status == 0U) {
642 switch (msg->msg.pg.ap_msg.cmn.msg_id) { 643 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
643 case PMU_AP_MSG_ID_INIT_ACK: 644 case PMU_AP_MSG_ID_INIT_ACK:
644 nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT"); 645 nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT");