summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-12-07 11:38:10 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-19 02:39:56 -0500
commita57258e9b18f2f336457165391572bc477371e94 (patch)
tree1e5bde4c3a6ced447adfe0baeadb6e986ebfe819 /drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
parentbadfffe3ef3f5901a5ebd819d25310b3b17c31eb (diff)
gpu: nvgpu: RPC interface support
- Created nv_pmu_rpc_cmd & nv_pmu_rpc_msg struct, & added member rpc under pmu_cmd & pmu_msg - Created RPC header interface - Created RPC desc struct & added as member to pmu payload - Defined PMU_RPC_EXECUTE() to convert different RPC request to make generic RPC call. - nvgpu_pmu_rpc_execute() function to execute RPC request by creating required RPC payload & send request to PMU to execute. - nvgpu_pmu_rpc_execute() function as default callback handler for RPC if caller not provided callback - Modified nvgpu_pmu_rpc_execute() function to include check of RPC payload parameter. - Modified nvgpu_pmu_cmd_post() function to handle RPC payload request. JIRA GPUT19X-137 Change-Id: Iac140eb6b98d6bae06a089e71c96f15068fe7e7b Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Signed-off-by: seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1613266 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_ipc.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c238
1 files changed, 196 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 4c706e57..829fee19 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -410,11 +410,13 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
410 if (payload == NULL) 410 if (payload == NULL)
411 return true; 411 return true;
412 412
413 if (payload->in.buf == NULL && payload->out.buf == NULL) 413 if (payload->in.buf == NULL && payload->out.buf == NULL &&
414 payload->rpc.prpc == NULL)
414 goto invalid_cmd; 415 goto invalid_cmd;
415 416
416 if ((payload->in.buf != NULL && payload->in.size == 0) || 417 if ((payload->in.buf != NULL && payload->in.size == 0) ||
417 (payload->out.buf != NULL && payload->out.size == 0)) 418 (payload->out.buf != NULL && payload->out.size == 0) ||
419 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0))
418 goto invalid_cmd; 420 goto invalid_cmd;
419 421
420 in_size = PMU_CMD_HDR_SIZE; 422 in_size = PMU_CMD_HDR_SIZE;
@@ -491,55 +493,61 @@ clean_up:
491 return err; 493 return err;
492} 494}
493 495
494int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, 496static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
495 struct pmu_msg *msg, struct pmu_payload *payload, 497 struct pmu_payload *payload, struct pmu_sequence *seq)
496 u32 queue_id, pmu_callback callback, void *cb_param,
497 u32 *seq_desc, unsigned long timeout)
498{ 498{
499 struct nvgpu_pmu *pmu = &g->pmu; 499 struct nvgpu_pmu *pmu = &g->pmu;
500 struct pmu_v *pv = &g->ops.pmu_ver; 500 struct pmu_v *pv = &g->ops.pmu_ver;
501 struct pmu_sequence *seq; 501 u16 dmem_alloc_size = 0;
502 void *in = NULL, *out = NULL; 502 u32 dmem_alloc_offset = 0;
503 int err; 503 int err = 0;
504 504
505 nvgpu_log_fn(g, " "); 505 nvgpu_log_fn(g, " ");
506 506
507 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 507 dmem_alloc_size = payload->rpc.size_rpc +
508 if (!cmd) 508 payload->rpc.size_scratch;
509 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 509 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size);
510 else if (!seq_desc) 510 if (!dmem_alloc_offset) {
511 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 511 err = -ENOMEM;
512 else 512 goto clean_up;
513 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
514
515 WARN_ON(1);
516 return -EINVAL;
517 } 513 }
518 514
519 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) 515 nvgpu_flcn_copy_to_dmem(pmu->flcn, dmem_alloc_offset,
520 return -EINVAL; 516 payload->rpc.prpc, payload->rpc.size_rpc, 0);
521 517
522 err = pmu_seq_acquire(pmu, &seq); 518 cmd->cmd.rpc.rpc_dmem_size = payload->rpc.size_rpc;
519 cmd->cmd.rpc.rpc_dmem_ptr = dmem_alloc_offset;
520
521 seq->out_payload = payload->rpc.prpc;
522 pv->pmu_allocation_set_dmem_size(pmu,
523 pv->get_pmu_seq_out_a_ptr(seq),
524 payload->rpc.size_rpc);
525 pv->pmu_allocation_set_dmem_offset(pmu,
526 pv->get_pmu_seq_out_a_ptr(seq),
527 dmem_alloc_offset);
528
529clean_up:
523 if (err) 530 if (err)
524 return err; 531 nvgpu_log_fn(g, "fail");
532 else
533 nvgpu_log_fn(g, "done");
525 534
526 cmd->hdr.seq_id = seq->id; 535 return err;
536}
527 537
528 cmd->hdr.ctrl_flags = 0; 538static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
529 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS; 539 struct pmu_payload *payload, struct pmu_sequence *seq)
530 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR; 540{
541 struct nvgpu_pmu *pmu = &g->pmu;
542 struct pmu_v *pv = &g->ops.pmu_ver;
543 void *in = NULL, *out = NULL;
544 int err = 0;
531 545
532 seq->callback = callback; 546 nvgpu_log_fn(g, " ");
533 seq->cb_params = cb_param;
534 seq->msg = msg;
535 seq->out_payload = NULL;
536 seq->desc = pmu->next_seq_desc++;
537 547
538 if (payload) 548 if (payload)
539 seq->out_payload = payload->out.buf; 549 seq->out_payload = payload->out.buf;
540 550
541 *seq_desc = seq->desc;
542
543 if (payload && payload->in.offset != 0) { 551 if (payload && payload->in.offset != 0) {
544 pv->set_pmu_allocation_ptr(pmu, &in, 552 pv->set_pmu_allocation_ptr(pmu, &in,
545 ((u8 *)&cmd->cmd + payload->in.offset)); 553 ((u8 *)&cmd->cmd + payload->in.offset));
@@ -553,7 +561,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
553 561
554 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 562 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
555 nvgpu_alloc(&pmu->dmem, 563 nvgpu_alloc(&pmu->dmem,
556 pv->pmu_allocation_get_dmem_size(pmu, in)); 564 pv->pmu_allocation_get_dmem_size(pmu, in));
557 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 565 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)))
558 goto clean_up; 566 goto clean_up;
559 567
@@ -596,7 +604,8 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
596 if (payload->in.buf != payload->out.buf) { 604 if (payload->in.buf != payload->out.buf) {
597 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 605 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) =
598 nvgpu_alloc(&pmu->dmem, 606 nvgpu_alloc(&pmu->dmem,
599 pv->pmu_allocation_get_dmem_size(pmu, out)); 607 pv->pmu_allocation_get_dmem_size(pmu,
608 out));
600 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 609 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
601 out))) 610 out)))
602 goto clean_up; 611 goto clean_up;
@@ -630,7 +639,72 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
630 639
631 } 640 }
632 641
642clean_up:
643 if (err) {
644 nvgpu_log_fn(g, "fail");
645 if (in)
646 nvgpu_free(&pmu->dmem,
647 pv->pmu_allocation_get_dmem_offset(pmu, in));
648 if (out)
649 nvgpu_free(&pmu->dmem,
650 pv->pmu_allocation_get_dmem_offset(pmu, out));
651 } else
652 nvgpu_log_fn(g, "done");
653
654 return err;
655}
656
657int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
658 struct pmu_msg *msg, struct pmu_payload *payload,
659 u32 queue_id, pmu_callback callback, void *cb_param,
660 u32 *seq_desc, unsigned long timeout)
661{
662 struct nvgpu_pmu *pmu = &g->pmu;
663 struct pmu_sequence *seq;
664 int err;
665
666 nvgpu_log_fn(g, " ");
667
668 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
669 if (!cmd)
670 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
671 else if (!seq_desc)
672 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
673 else
674 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
675
676 WARN_ON(1);
677 return -EINVAL;
678 }
679
680 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id))
681 return -EINVAL;
633 682
683 err = pmu_seq_acquire(pmu, &seq);
684 if (err)
685 return err;
686
687 cmd->hdr.seq_id = seq->id;
688
689 cmd->hdr.ctrl_flags = 0;
690 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS;
691 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR;
692
693 seq->callback = callback;
694 seq->cb_params = cb_param;
695 seq->msg = msg;
696 seq->out_payload = NULL;
697 seq->desc = pmu->next_seq_desc++;
698
699 *seq_desc = seq->desc;
700
701 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID)
702 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq);
703 else
704 err = pmu_cmd_payload_extract(g, cmd, payload, seq);
705
706 if (err)
707 goto clean_up;
634 708
635 seq->state = PMU_SEQ_STATE_USED; 709 seq->state = PMU_SEQ_STATE_USED;
636 710
@@ -644,12 +718,6 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
644 718
645clean_up: 719clean_up:
646 nvgpu_log_fn(g, "fail"); 720 nvgpu_log_fn(g, "fail");
647 if (in)
648 nvgpu_free(&pmu->dmem,
649 pv->pmu_allocation_get_dmem_offset(pmu, in));
650 if (out)
651 nvgpu_free(&pmu->dmem,
652 pv->pmu_allocation_get_dmem_offset(pmu, out));
653 721
654 pmu_seq_release(pmu, seq); 722 pmu_seq_release(pmu, seq);
655 return err; 723 return err;
@@ -666,6 +734,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
666 nvgpu_log_fn(g, " "); 734 nvgpu_log_fn(g, " ");
667 735
668 seq = &pmu->seq[msg->hdr.seq_id]; 736 seq = &pmu->seq[msg->hdr.seq_id];
737
669 if (seq->state != PMU_SEQ_STATE_USED && 738 if (seq->state != PMU_SEQ_STATE_USED &&
670 seq->state != PMU_SEQ_STATE_CANCELLED) { 739 seq->state != PMU_SEQ_STATE_CANCELLED) {
671 nvgpu_err(g, "msg for an unknown sequence %d", seq->id); 740 nvgpu_err(g, "msg for an unknown sequence %d", seq->id);
@@ -905,3 +974,88 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
905 return -ETIMEDOUT; 974 return -ETIMEDOUT;
906} 975}
907 976
977static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
978 void *param, u32 handle, u32 status)
979{
980 struct nv_pmu_rpc_header rpc;
981
982 memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
983 if (param)
984 memcpy(&rpc, param, sizeof(struct nv_pmu_rpc_header));
985
986 if (rpc.flcn_status) {
987 nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
988 rpc.flcn_status, rpc.function);
989 goto exit;
990 }
991
992 switch (msg->hdr.unit_id) {
993 /* TBD case will be added */
994 default:
995 nvgpu_err(g, " Invalid RPC response, stats 0x%x",
996 rpc.flcn_status);
997 break;
998 }
999
1000exit:
1001 /* free allocated memory */
1002 if (param)
1003 nvgpu_kfree(g, param);
1004}
1005
1006int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
1007 u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
1008 void *caller_cb_param)
1009{
1010 struct gk20a *g = pmu->g;
1011 struct pmu_cmd cmd;
1012 struct pmu_payload payload;
1013 pmu_callback callback = caller_cb;
1014 void *rpc_buff = NULL;
1015 void *cb_param = caller_cb_param;
1016 u32 seq = 0;
1017 int status = 0;
1018
1019 if (!pmu->pmu_ready) {
1020 nvgpu_warn(g, "PMU is not ready to process RPC");
1021 return -EINVAL;
1022 }
1023
1024 rpc_buff = nvgpu_kzalloc(g, size_rpc);
1025 if (!rpc_buff)
1026 return -ENOMEM;
1027
1028 memset(&cmd, 0, sizeof(struct pmu_cmd));
1029 memset(&payload, 0, sizeof(struct pmu_payload));
1030
1031 cmd.hdr.unit_id = rpc->unit_id;
1032 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd);
1033 cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID;
1034 cmd.cmd.rpc.flags = rpc->flags;
1035
1036 memcpy(rpc_buff, rpc, size_rpc);
1037 payload.rpc.prpc = rpc_buff;
1038 payload.rpc.size_rpc = size_rpc;
1039 payload.rpc.size_scratch = size_scratch;
1040
1041 /* assign default RPC handler & buffer */
1042 if (!callback && !cb_param) {
1043 callback = pmu_rpc_handler;
1044 cb_param = rpc_buff;
1045 }
1046
1047 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
1048 PMU_COMMAND_QUEUE_LPQ, pmu_rpc_handler,
1049 cb_param, &seq, ~0);
1050 if (status) {
1051 nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x",
1052 status, rpc->function);
1053 }
1054
1055 /* if caller passed buff then free allocated RPC buffer */
1056 if (caller_cb_param)
1057 nvgpu_kfree(g, rpc_buff);
1058
1059 return status;
1060
1061}