summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:29:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:28 -0400
commite988951ccab1031022ac354bbe8f53e1dc849b7a (patch)
tree7fe8d7fa8b46f501c2e1a873b84873a5173478d5 /drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
parent652da8116966af2a8438a9a9f135a11b4e5c6c7b (diff)
gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_ipc.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c132
1 files changed, 85 insertions, 47 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 37abb34c..39be07cc 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -154,33 +154,41 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
154 struct nvgpu_falcon_queue *queue; 154 struct nvgpu_falcon_queue *queue;
155 u32 in_size, out_size; 155 u32 in_size, out_size;
156 156
157 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) 157 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) {
158 goto invalid_cmd; 158 goto invalid_cmd;
159 }
159 160
160 queue = &pmu->queue[queue_id]; 161 queue = &pmu->queue[queue_id];
161 if (cmd->hdr.size < PMU_CMD_HDR_SIZE) 162 if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
162 goto invalid_cmd; 163 goto invalid_cmd;
164 }
163 165
164 if (cmd->hdr.size > (queue->size >> 1)) 166 if (cmd->hdr.size > (queue->size >> 1)) {
165 goto invalid_cmd; 167 goto invalid_cmd;
168 }
166 169
167 if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) 170 if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) {
168 goto invalid_cmd; 171 goto invalid_cmd;
172 }
169 173
170 if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) 174 if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) {
171 goto invalid_cmd; 175 goto invalid_cmd;
176 }
172 177
173 if (payload == NULL) 178 if (payload == NULL) {
174 return true; 179 return true;
180 }
175 181
176 if (payload->in.buf == NULL && payload->out.buf == NULL && 182 if (payload->in.buf == NULL && payload->out.buf == NULL &&
177 payload->rpc.prpc == NULL) 183 payload->rpc.prpc == NULL) {
178 goto invalid_cmd; 184 goto invalid_cmd;
185 }
179 186
180 if ((payload->in.buf != NULL && payload->in.size == 0) || 187 if ((payload->in.buf != NULL && payload->in.size == 0) ||
181 (payload->out.buf != NULL && payload->out.size == 0) || 188 (payload->out.buf != NULL && payload->out.size == 0) ||
182 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) 189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) {
183 goto invalid_cmd; 190 goto invalid_cmd;
191 }
184 192
185 in_size = PMU_CMD_HDR_SIZE; 193 in_size = PMU_CMD_HDR_SIZE;
186 if (payload->in.buf) { 194 if (payload->in.buf) {
@@ -194,13 +202,15 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
194 out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu); 202 out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu);
195 } 203 }
196 204
197 if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) 205 if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) {
198 goto invalid_cmd; 206 goto invalid_cmd;
207 }
199 208
200 209
201 if ((payload->in.offset != 0 && payload->in.buf == NULL) || 210 if ((payload->in.offset != 0 && payload->in.buf == NULL) ||
202 (payload->out.offset != 0 && payload->out.buf == NULL)) 211 (payload->out.offset != 0 && payload->out.buf == NULL)) {
203 goto invalid_cmd; 212 goto invalid_cmd;
213 }
204 214
205 return true; 215 return true;
206 216
@@ -233,16 +243,18 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
233 243
234 do { 244 do {
235 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); 245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
236 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) 246 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) {
237 nvgpu_usleep_range(1000, 2000); 247 nvgpu_usleep_range(1000, 2000);
238 else 248 } else {
239 break; 249 break;
250 }
240 } while (1); 251 } while (1);
241 252
242 if (err) 253 if (err) {
243 nvgpu_err(g, "fail to write cmd to queue %d", queue_id); 254 nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
244 else 255 } else {
245 nvgpu_log_fn(g, "done"); 256 nvgpu_log_fn(g, "done");
257 }
246 258
247 return err; 259 return err;
248} 260}
@@ -281,10 +293,11 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
281 dmem_alloc_offset); 293 dmem_alloc_offset);
282 294
283clean_up: 295clean_up:
284 if (err) 296 if (err) {
285 nvgpu_log_fn(g, "fail"); 297 nvgpu_log_fn(g, "fail");
286 else 298 } else {
287 nvgpu_log_fn(g, "done"); 299 nvgpu_log_fn(g, "done");
300 }
288 301
289 return err; 302 return err;
290} 303}
@@ -299,25 +312,28 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
299 312
300 nvgpu_log_fn(g, " "); 313 nvgpu_log_fn(g, " ");
301 314
302 if (payload) 315 if (payload) {
303 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 }
304 318
305 if (payload && payload->in.offset != 0) { 319 if (payload && payload->in.offset != 0) {
306 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
307 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
308 322
309 if (payload->in.buf != payload->out.buf) 323 if (payload->in.buf != payload->out.buf) {
310 pv->pmu_allocation_set_dmem_size(pmu, in, 324 pv->pmu_allocation_set_dmem_size(pmu, in,
311 (u16)payload->in.size); 325 (u16)payload->in.size);
312 else 326 } else {
313 pv->pmu_allocation_set_dmem_size(pmu, in, 327 pv->pmu_allocation_set_dmem_size(pmu, in,
314 (u16)max(payload->in.size, payload->out.size)); 328 (u16)max(payload->in.size, payload->out.size));
329 }
315 330
316 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
317 nvgpu_alloc(&pmu->dmem, 332 nvgpu_alloc(&pmu->dmem,
318 pv->pmu_allocation_get_dmem_size(pmu, in)); 333 pv->pmu_allocation_get_dmem_size(pmu, in));
319 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 334 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) {
320 goto clean_up; 335 goto clean_up;
336 }
321 337
322 if (payload->in.fb_size != 0x0) { 338 if (payload->in.fb_size != 0x0) {
323 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
@@ -361,8 +377,9 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
361 pv->pmu_allocation_get_dmem_size(pmu, 377 pv->pmu_allocation_get_dmem_size(pmu,
362 out)); 378 out));
363 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 379 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
364 out))) 380 out))) {
365 goto clean_up; 381 goto clean_up;
382 }
366 383
367 if (payload->out.fb_size != 0x0) { 384 if (payload->out.fb_size != 0x0) {
368 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
@@ -396,14 +413,17 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
396clean_up: 413clean_up:
397 if (err) { 414 if (err) {
398 nvgpu_log_fn(g, "fail"); 415 nvgpu_log_fn(g, "fail");
399 if (in) 416 if (in) {
400 nvgpu_free(&pmu->dmem, 417 nvgpu_free(&pmu->dmem,
401 pv->pmu_allocation_get_dmem_offset(pmu, in)); 418 pv->pmu_allocation_get_dmem_offset(pmu, in));
402 if (out) 419 }
420 if (out) {
403 nvgpu_free(&pmu->dmem, 421 nvgpu_free(&pmu->dmem,
404 pv->pmu_allocation_get_dmem_offset(pmu, out)); 422 pv->pmu_allocation_get_dmem_offset(pmu, out));
405 } else 423 }
424 } else {
406 nvgpu_log_fn(g, "done"); 425 nvgpu_log_fn(g, "done");
426 }
407 427
408 return err; 428 return err;
409} 429}
@@ -420,23 +440,26 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
420 nvgpu_log_fn(g, " "); 440 nvgpu_log_fn(g, " ");
421 441
422 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 442 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
423 if (!cmd) 443 if (!cmd) {
424 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
425 else if (!seq_desc) 445 } else if (!seq_desc) {
426 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
427 else 447 } else {
428 nvgpu_warn(g, "%s(): PMU is not ready", __func__); 448 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
449 }
429 450
430 WARN_ON(1); 451 WARN_ON(1);
431 return -EINVAL; 452 return -EINVAL;
432 } 453 }
433 454
434 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) 455 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) {
435 return -EINVAL; 456 return -EINVAL;
457 }
436 458
437 err = pmu_seq_acquire(pmu, &seq); 459 err = pmu_seq_acquire(pmu, &seq);
438 if (err) 460 if (err) {
439 return err; 461 return err;
462 }
440 463
441 cmd->hdr.seq_id = seq->id; 464 cmd->hdr.seq_id = seq->id;
442 465
@@ -452,19 +475,22 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
452 475
453 *seq_desc = seq->desc; 476 *seq_desc = seq->desc;
454 477
455 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) 478 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
456 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq); 479 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq);
457 else 480 } else {
458 err = pmu_cmd_payload_extract(g, cmd, payload, seq); 481 err = pmu_cmd_payload_extract(g, cmd, payload, seq);
482 }
459 483
460 if (err) 484 if (err) {
461 goto clean_up; 485 goto clean_up;
486 }
462 487
463 seq->state = PMU_SEQ_STATE_USED; 488 seq->state = PMU_SEQ_STATE_USED;
464 489
465 err = pmu_write_cmd(pmu, cmd, queue_id, timeout); 490 err = pmu_write_cmd(pmu, cmd, queue_id, timeout);
466 if (err) 491 if (err) {
467 seq->state = PMU_SEQ_STATE_PENDING; 492 seq->state = PMU_SEQ_STATE_PENDING;
493 }
468 494
469 nvgpu_log_fn(g, "done"); 495 nvgpu_log_fn(g, "done");
470 496
@@ -516,18 +542,21 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
516 pv->pmu_allocation_get_dmem_size(pmu, 542 pv->pmu_allocation_get_dmem_size(pmu,
517 pv->get_pmu_seq_out_a_ptr(seq)), 0); 543 pv->get_pmu_seq_out_a_ptr(seq)), 0);
518 } 544 }
519 } else 545 } else {
520 seq->callback = NULL; 546 seq->callback = NULL;
547 }
521 if (pv->pmu_allocation_get_dmem_size(pmu, 548 if (pv->pmu_allocation_get_dmem_size(pmu,
522 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 549 pv->get_pmu_seq_in_a_ptr(seq)) != 0) {
523 nvgpu_free(&pmu->dmem, 550 nvgpu_free(&pmu->dmem,
524 pv->pmu_allocation_get_dmem_offset(pmu, 551 pv->pmu_allocation_get_dmem_offset(pmu,
525 pv->get_pmu_seq_in_a_ptr(seq))); 552 pv->get_pmu_seq_in_a_ptr(seq)));
553 }
526 if (pv->pmu_allocation_get_dmem_size(pmu, 554 if (pv->pmu_allocation_get_dmem_size(pmu,
527 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 555 pv->get_pmu_seq_out_a_ptr(seq)) != 0) {
528 nvgpu_free(&pmu->dmem, 556 nvgpu_free(&pmu->dmem,
529 pv->pmu_allocation_get_dmem_offset(pmu, 557 pv->pmu_allocation_get_dmem_offset(pmu,
530 pv->get_pmu_seq_out_a_ptr(seq))); 558 pv->get_pmu_seq_out_a_ptr(seq)));
559 }
531 560
532 if (seq->out_mem != NULL) { 561 if (seq->out_mem != NULL) {
533 memset(pv->pmu_allocation_get_fb_addr(pmu, 562 memset(pv->pmu_allocation_get_fb_addr(pmu,
@@ -536,10 +565,11 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
536 pv->get_pmu_seq_out_a_ptr(seq))); 565 pv->get_pmu_seq_out_a_ptr(seq)));
537 566
538 nvgpu_pmu_surface_free(g, seq->out_mem); 567 nvgpu_pmu_surface_free(g, seq->out_mem);
539 if (seq->out_mem != seq->in_mem) 568 if (seq->out_mem != seq->in_mem) {
540 nvgpu_kfree(g, seq->out_mem); 569 nvgpu_kfree(g, seq->out_mem);
541 else 570 } else {
542 seq->out_mem = NULL; 571 seq->out_mem = NULL;
572 }
543 } 573 }
544 574
545 if (seq->in_mem != NULL) { 575 if (seq->in_mem != NULL) {
@@ -553,8 +583,9 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
553 seq->in_mem = NULL; 583 seq->in_mem = NULL;
554 } 584 }
555 585
556 if (seq->callback) 586 if (seq->callback) {
557 seq->callback(g, msg, seq->cb_params, seq->desc, ret); 587 seq->callback(g, msg, seq->cb_params, seq->desc, ret);
588 }
558 589
559 pmu_seq_release(pmu, seq); 590 pmu_seq_release(pmu, seq);
560 591
@@ -667,11 +698,13 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
667 698
668 if (unlikely(!pmu->pmu_ready)) { 699 if (unlikely(!pmu->pmu_ready)) {
669 nvgpu_pmu_process_init_msg(pmu, &msg); 700 nvgpu_pmu_process_init_msg(pmu, &msg);
670 if (g->ops.pmu.init_wpr_region != NULL) 701 if (g->ops.pmu.init_wpr_region != NULL) {
671 g->ops.pmu.init_wpr_region(g); 702 g->ops.pmu.init_wpr_region(g);
703 }
672 704
673 if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 705 if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
674 g->ops.pmu.pmu_init_perfmon(pmu); 706 g->ops.pmu.pmu_init_perfmon(pmu);
707 }
675 708
676 return 0; 709 return 0;
677 } 710 }
@@ -687,10 +720,11 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
687 720
688 msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK; 721 msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK;
689 722
690 if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) 723 if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) {
691 pmu_handle_event(pmu, &msg); 724 pmu_handle_event(pmu, &msg);
692 else 725 } else {
693 pmu_response_handle(pmu, &msg); 726 pmu_response_handle(pmu, &msg);
727 }
694 } 728 }
695 729
696 return 0; 730 return 0;
@@ -706,11 +740,13 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
706 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 740 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
707 741
708 do { 742 do {
709 if (*(u8 *)var == val) 743 if (*(u8 *)var == val) {
710 return 0; 744 return 0;
745 }
711 746
712 if (gk20a_pmu_is_interrupted(pmu)) 747 if (gk20a_pmu_is_interrupted(pmu)) {
713 gk20a_pmu_isr(g); 748 gk20a_pmu_isr(g);
749 }
714 750
715 nvgpu_usleep_range(delay, delay * 2); 751 nvgpu_usleep_range(delay, delay * 2);
716 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
@@ -816,8 +852,9 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
816 852
817exit: 853exit:
818 /* free allocated memory */ 854 /* free allocated memory */
819 if (rpc_payload->is_mem_free_set) 855 if (rpc_payload->is_mem_free_set) {
820 nvgpu_kfree(g, rpc_payload); 856 nvgpu_kfree(g, rpc_payload);
857 }
821} 858}
822 859
823int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, 860int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
@@ -914,8 +951,9 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
914 951
915exit: 952exit:
916 if (status) { 953 if (status) {
917 if (rpc_payload) 954 if (rpc_payload) {
918 nvgpu_kfree(g, rpc_payload); 955 nvgpu_kfree(g, rpc_payload);
956 }
919 } 957 }
920 958
921 return status; 959 return status;