summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_ipc.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 9fe999ae..6f88260f 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -221,7 +221,7 @@ invalid_cmd:
221 "payload in=%p, in_size=%d, in_offset=%d,\n" 221 "payload in=%p, in_size=%d, in_offset=%d,\n"
222 "payload out=%p, out_size=%d, out_offset=%d", 222 "payload out=%p, out_size=%d, out_offset=%d",
223 queue_id, cmd->hdr.size, cmd->hdr.unit_id, 223 queue_id, cmd->hdr.size, cmd->hdr.unit_id,
224 msg, msg ? msg->hdr.unit_id : ~0, 224 msg, (msg != NULL) ? msg->hdr.unit_id : ~0,
225 &payload->in, payload->in.size, payload->in.offset, 225 &payload->in, payload->in.size, payload->in.offset,
226 &payload->out, payload->out.size, payload->out.offset); 226 &payload->out, payload->out.size, payload->out.offset);
227 227
@@ -243,7 +243,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
243 243
244 do { 244 do {
245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); 245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
246 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) { 246 if (err == -EAGAIN && nvgpu_timeout_expired(&timeout) == 0) {
247 nvgpu_usleep_range(1000, 2000); 247 nvgpu_usleep_range(1000, 2000);
248 } else { 248 } else {
249 break; 249 break;
@@ -273,7 +273,7 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
273 dmem_alloc_size = payload->rpc.size_rpc + 273 dmem_alloc_size = payload->rpc.size_rpc +
274 payload->rpc.size_scratch; 274 payload->rpc.size_scratch;
275 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size); 275 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size);
276 if (!dmem_alloc_offset) { 276 if (dmem_alloc_offset == 0U) {
277 err = -ENOMEM; 277 err = -ENOMEM;
278 goto clean_up; 278 goto clean_up;
279 } 279 }
@@ -312,11 +312,11 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
312 312
313 nvgpu_log_fn(g, " "); 313 nvgpu_log_fn(g, " ");
314 314
315 if (payload) { 315 if (payload != NULL) {
316 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 } 317 }
318 318
319 if (payload && payload->in.offset != 0U) { 319 if (payload != NULL && payload->in.offset != 0U) {
320 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
321 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
322 322
@@ -331,14 +331,14 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
332 nvgpu_alloc(&pmu->dmem, 332 nvgpu_alloc(&pmu->dmem,
333 pv->pmu_allocation_get_dmem_size(pmu, in)); 333 pv->pmu_allocation_get_dmem_size(pmu, in));
334 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) { 334 if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) == 0U) {
335 goto clean_up; 335 goto clean_up;
336 } 336 }
337 337
338 if (payload->in.fb_size != 0x0U) { 338 if (payload->in.fb_size != 0x0U) {
339 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
340 sizeof(struct nvgpu_mem)); 340 sizeof(struct nvgpu_mem));
341 if (!seq->in_mem) { 341 if (seq->in_mem == NULL) {
342 err = -ENOMEM; 342 err = -ENOMEM;
343 goto clean_up; 343 goto clean_up;
344 } 344 }
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
365 pv->pmu_allocation_get_dmem_offset(pmu, in)); 365 pv->pmu_allocation_get_dmem_offset(pmu, in));
366 } 366 }
367 367
368 if (payload && payload->out.offset != 0U) { 368 if (payload != NULL && payload->out.offset != 0U) {
369 pv->set_pmu_allocation_ptr(pmu, &out, 369 pv->set_pmu_allocation_ptr(pmu, &out,
370 ((u8 *)&cmd->cmd + payload->out.offset)); 370 ((u8 *)&cmd->cmd + payload->out.offset));
371 pv->pmu_allocation_set_dmem_size(pmu, out, 371 pv->pmu_allocation_set_dmem_size(pmu, out,
@@ -376,15 +376,15 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
376 nvgpu_alloc(&pmu->dmem, 376 nvgpu_alloc(&pmu->dmem,
377 pv->pmu_allocation_get_dmem_size(pmu, 377 pv->pmu_allocation_get_dmem_size(pmu,
378 out)); 378 out));
379 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 379 if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
380 out))) { 380 out)) == 0U) {
381 goto clean_up; 381 goto clean_up;
382 } 382 }
383 383
384 if (payload->out.fb_size != 0x0U) { 384 if (payload->out.fb_size != 0x0U) {
385 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
386 sizeof(struct nvgpu_mem)); 386 sizeof(struct nvgpu_mem));
387 if (!seq->out_mem) { 387 if (seq->out_mem == NULL) {
388 err = -ENOMEM; 388 err = -ENOMEM;
389 goto clean_up; 389 goto clean_up;
390 } 390 }
@@ -439,16 +439,16 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
439 439
440 nvgpu_log_fn(g, " "); 440 nvgpu_log_fn(g, " ");
441 441
442 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 442 if (cmd == NULL || seq_desc == NULL || !pmu->pmu_ready) {
443 if (!cmd) { 443 if (cmd == NULL) {
444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
445 } else if (!seq_desc) { 445 } else if (seq_desc == NULL) {
446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
447 } else { 447 } else {
448 nvgpu_warn(g, "%s(): PMU is not ready", __func__); 448 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
449 } 449 }
450 450
451 WARN_ON(1); 451 WARN_ON(true);
452 return -EINVAL; 452 return -EINVAL;
453 } 453 }
454 454
@@ -612,7 +612,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
612 err = g->ops.perf.handle_pmu_perf_event(g, 612 err = g->ops.perf.handle_pmu_perf_event(g,
613 (void *)&msg->msg.perf); 613 (void *)&msg->msg.perf);
614 } else { 614 } else {
615 WARN_ON(1); 615 WARN_ON(true);
616 } 616 }
617 break; 617 break;
618 case PMU_UNIT_THERM: 618 case PMU_UNIT_THERM:
@@ -641,7 +641,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
641 641
642 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, 642 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
643 PMU_MSG_HDR_SIZE, &bytes_read); 643 PMU_MSG_HDR_SIZE, &bytes_read);
644 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 644 if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
645 nvgpu_err(g, "fail to read msg from queue %d", queue->id); 645 nvgpu_err(g, "fail to read msg from queue %d", queue->id);
646 *status = err | -EINVAL; 646 *status = err | -EINVAL;
647 goto clean_up; 647 goto clean_up;
@@ -657,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
657 /* read again after rewind */ 657 /* read again after rewind */
658 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, 658 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
659 PMU_MSG_HDR_SIZE, &bytes_read); 659 PMU_MSG_HDR_SIZE, &bytes_read);
660 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 660 if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
661 nvgpu_err(g, 661 nvgpu_err(g,
662 "fail to read msg from queue %d", queue->id); 662 "fail to read msg from queue %d", queue->id);
663 *status = err | -EINVAL; 663 *status = err | -EINVAL;
@@ -676,7 +676,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
676 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; 676 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
677 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, 677 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg,
678 read_size, &bytes_read); 678 read_size, &bytes_read);
679 if (err || bytes_read != read_size) { 679 if (err != 0 || bytes_read != read_size) {
680 nvgpu_err(g, 680 nvgpu_err(g,
681 "fail to read msg from queue %d", queue->id); 681 "fail to read msg from queue %d", queue->id);
682 *status = err; 682 *status = err;
@@ -750,7 +750,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
750 750
751 nvgpu_usleep_range(delay, delay * 2U); 751 nvgpu_usleep_range(delay, delay * 2U);
752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
753 } while (!nvgpu_timeout_expired(&timeout)); 753 } while (nvgpu_timeout_expired(&timeout) == 0);
754 754
755 return -ETIMEDOUT; 755 return -ETIMEDOUT;
756} 756}
@@ -887,7 +887,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
887 if (caller_cb == NULL) { 887 if (caller_cb == NULL) {
888 rpc_payload = nvgpu_kzalloc(g, 888 rpc_payload = nvgpu_kzalloc(g,
889 sizeof(struct rpc_handler_payload) + size_rpc); 889 sizeof(struct rpc_handler_payload) + size_rpc);
890 if (!rpc_payload) { 890 if (rpc_payload == NULL) {
891 status = ENOMEM; 891 status = ENOMEM;
892 goto exit; 892 goto exit;
893 } 893 }
@@ -907,7 +907,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
907 } 907 }
908 rpc_payload = nvgpu_kzalloc(g, 908 rpc_payload = nvgpu_kzalloc(g,
909 sizeof(struct rpc_handler_payload)); 909 sizeof(struct rpc_handler_payload));
910 if (!rpc_payload) { 910 if (rpc_payload == NULL) {
911 status = ENOMEM; 911 status = ENOMEM;
912 goto exit; 912 goto exit;
913 } 913 }