summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-12-07 11:38:10 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-19 02:39:56 -0500
commita57258e9b18f2f336457165391572bc477371e94 (patch)
tree1e5bde4c3a6ced447adfe0baeadb6e986ebfe819
parentbadfffe3ef3f5901a5ebd819d25310b3b17c31eb (diff)
gpu: nvgpu: RPC interface support
- Created nv_pmu_rpc_cmd & nv_pmu_rpc_msg struct, & added member rpc under pmu_cmd & pmu_msg - Created RPC header interface - Created RPC desc struct & added as member to pmu payload - Defined PMU_RPC_EXECUTE() to convert different RPC request to make generic RPC call. - nvgpu_pmu_rpc_execute() function to execute RPC request by creating required RPC payload & send request to PMU to execute. - nvgpu_pmu_rpc_execute() function as default callback handler for RPC if caller not provided callback - Modified nvgpu_pmu_rpc_execute() function to include check of RPC payload parameter. - Modified nvgpu_pmu_cmd_post() function to handle RPC payload request. JIRA GPUT19X-137 Change-Id: Iac140eb6b98d6bae06a089e71c96f15068fe7e7b Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Signed-off-by: seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1613266 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c238
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h25
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h18
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h43
4 files changed, 282 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 4c706e57..829fee19 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -410,11 +410,13 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
410 if (payload == NULL) 410 if (payload == NULL)
411 return true; 411 return true;
412 412
413 if (payload->in.buf == NULL && payload->out.buf == NULL) 413 if (payload->in.buf == NULL && payload->out.buf == NULL &&
414 payload->rpc.prpc == NULL)
414 goto invalid_cmd; 415 goto invalid_cmd;
415 416
416 if ((payload->in.buf != NULL && payload->in.size == 0) || 417 if ((payload->in.buf != NULL && payload->in.size == 0) ||
417 (payload->out.buf != NULL && payload->out.size == 0)) 418 (payload->out.buf != NULL && payload->out.size == 0) ||
419 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0))
418 goto invalid_cmd; 420 goto invalid_cmd;
419 421
420 in_size = PMU_CMD_HDR_SIZE; 422 in_size = PMU_CMD_HDR_SIZE;
@@ -491,55 +493,61 @@ clean_up:
491 return err; 493 return err;
492} 494}
493 495
494int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, 496static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
495 struct pmu_msg *msg, struct pmu_payload *payload, 497 struct pmu_payload *payload, struct pmu_sequence *seq)
496 u32 queue_id, pmu_callback callback, void *cb_param,
497 u32 *seq_desc, unsigned long timeout)
498{ 498{
499 struct nvgpu_pmu *pmu = &g->pmu; 499 struct nvgpu_pmu *pmu = &g->pmu;
500 struct pmu_v *pv = &g->ops.pmu_ver; 500 struct pmu_v *pv = &g->ops.pmu_ver;
501 struct pmu_sequence *seq; 501 u16 dmem_alloc_size = 0;
502 void *in = NULL, *out = NULL; 502 u32 dmem_alloc_offset = 0;
503 int err; 503 int err = 0;
504 504
505 nvgpu_log_fn(g, " "); 505 nvgpu_log_fn(g, " ");
506 506
507 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 507 dmem_alloc_size = payload->rpc.size_rpc +
508 if (!cmd) 508 payload->rpc.size_scratch;
509 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 509 dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size);
510 else if (!seq_desc) 510 if (!dmem_alloc_offset) {
511 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 511 err = -ENOMEM;
512 else 512 goto clean_up;
513 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
514
515 WARN_ON(1);
516 return -EINVAL;
517 } 513 }
518 514
519 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) 515 nvgpu_flcn_copy_to_dmem(pmu->flcn, dmem_alloc_offset,
520 return -EINVAL; 516 payload->rpc.prpc, payload->rpc.size_rpc, 0);
521 517
522 err = pmu_seq_acquire(pmu, &seq); 518 cmd->cmd.rpc.rpc_dmem_size = payload->rpc.size_rpc;
519 cmd->cmd.rpc.rpc_dmem_ptr = dmem_alloc_offset;
520
521 seq->out_payload = payload->rpc.prpc;
522 pv->pmu_allocation_set_dmem_size(pmu,
523 pv->get_pmu_seq_out_a_ptr(seq),
524 payload->rpc.size_rpc);
525 pv->pmu_allocation_set_dmem_offset(pmu,
526 pv->get_pmu_seq_out_a_ptr(seq),
527 dmem_alloc_offset);
528
529clean_up:
523 if (err) 530 if (err)
524 return err; 531 nvgpu_log_fn(g, "fail");
532 else
533 nvgpu_log_fn(g, "done");
525 534
526 cmd->hdr.seq_id = seq->id; 535 return err;
536}
527 537
528 cmd->hdr.ctrl_flags = 0; 538static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
529 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS; 539 struct pmu_payload *payload, struct pmu_sequence *seq)
530 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR; 540{
541 struct nvgpu_pmu *pmu = &g->pmu;
542 struct pmu_v *pv = &g->ops.pmu_ver;
543 void *in = NULL, *out = NULL;
544 int err = 0;
531 545
532 seq->callback = callback; 546 nvgpu_log_fn(g, " ");
533 seq->cb_params = cb_param;
534 seq->msg = msg;
535 seq->out_payload = NULL;
536 seq->desc = pmu->next_seq_desc++;
537 547
538 if (payload) 548 if (payload)
539 seq->out_payload = payload->out.buf; 549 seq->out_payload = payload->out.buf;
540 550
541 *seq_desc = seq->desc;
542
543 if (payload && payload->in.offset != 0) { 551 if (payload && payload->in.offset != 0) {
544 pv->set_pmu_allocation_ptr(pmu, &in, 552 pv->set_pmu_allocation_ptr(pmu, &in,
545 ((u8 *)&cmd->cmd + payload->in.offset)); 553 ((u8 *)&cmd->cmd + payload->in.offset));
@@ -553,7 +561,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
553 561
554 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 562 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
555 nvgpu_alloc(&pmu->dmem, 563 nvgpu_alloc(&pmu->dmem,
556 pv->pmu_allocation_get_dmem_size(pmu, in)); 564 pv->pmu_allocation_get_dmem_size(pmu, in));
557 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 565 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)))
558 goto clean_up; 566 goto clean_up;
559 567
@@ -596,7 +604,8 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
596 if (payload->in.buf != payload->out.buf) { 604 if (payload->in.buf != payload->out.buf) {
597 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 605 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) =
598 nvgpu_alloc(&pmu->dmem, 606 nvgpu_alloc(&pmu->dmem,
599 pv->pmu_allocation_get_dmem_size(pmu, out)); 607 pv->pmu_allocation_get_dmem_size(pmu,
608 out));
600 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 609 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
601 out))) 610 out)))
602 goto clean_up; 611 goto clean_up;
@@ -630,7 +639,72 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
630 639
631 } 640 }
632 641
642clean_up:
643 if (err) {
644 nvgpu_log_fn(g, "fail");
645 if (in)
646 nvgpu_free(&pmu->dmem,
647 pv->pmu_allocation_get_dmem_offset(pmu, in));
648 if (out)
649 nvgpu_free(&pmu->dmem,
650 pv->pmu_allocation_get_dmem_offset(pmu, out));
651 } else
652 nvgpu_log_fn(g, "done");
653
654 return err;
655}
656
657int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
658 struct pmu_msg *msg, struct pmu_payload *payload,
659 u32 queue_id, pmu_callback callback, void *cb_param,
660 u32 *seq_desc, unsigned long timeout)
661{
662 struct nvgpu_pmu *pmu = &g->pmu;
663 struct pmu_sequence *seq;
664 int err;
665
666 nvgpu_log_fn(g, " ");
667
668 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
669 if (!cmd)
670 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
671 else if (!seq_desc)
672 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
673 else
674 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
675
676 WARN_ON(1);
677 return -EINVAL;
678 }
679
680 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id))
681 return -EINVAL;
633 682
683 err = pmu_seq_acquire(pmu, &seq);
684 if (err)
685 return err;
686
687 cmd->hdr.seq_id = seq->id;
688
689 cmd->hdr.ctrl_flags = 0;
690 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS;
691 cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR;
692
693 seq->callback = callback;
694 seq->cb_params = cb_param;
695 seq->msg = msg;
696 seq->out_payload = NULL;
697 seq->desc = pmu->next_seq_desc++;
698
699 *seq_desc = seq->desc;
700
701 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID)
702 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq);
703 else
704 err = pmu_cmd_payload_extract(g, cmd, payload, seq);
705
706 if (err)
707 goto clean_up;
634 708
635 seq->state = PMU_SEQ_STATE_USED; 709 seq->state = PMU_SEQ_STATE_USED;
636 710
@@ -644,12 +718,6 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
644 718
645clean_up: 719clean_up:
646 nvgpu_log_fn(g, "fail"); 720 nvgpu_log_fn(g, "fail");
647 if (in)
648 nvgpu_free(&pmu->dmem,
649 pv->pmu_allocation_get_dmem_offset(pmu, in));
650 if (out)
651 nvgpu_free(&pmu->dmem,
652 pv->pmu_allocation_get_dmem_offset(pmu, out));
653 721
654 pmu_seq_release(pmu, seq); 722 pmu_seq_release(pmu, seq);
655 return err; 723 return err;
@@ -666,6 +734,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
666 nvgpu_log_fn(g, " "); 734 nvgpu_log_fn(g, " ");
667 735
668 seq = &pmu->seq[msg->hdr.seq_id]; 736 seq = &pmu->seq[msg->hdr.seq_id];
737
669 if (seq->state != PMU_SEQ_STATE_USED && 738 if (seq->state != PMU_SEQ_STATE_USED &&
670 seq->state != PMU_SEQ_STATE_CANCELLED) { 739 seq->state != PMU_SEQ_STATE_CANCELLED) {
671 nvgpu_err(g, "msg for an unknown sequence %d", seq->id); 740 nvgpu_err(g, "msg for an unknown sequence %d", seq->id);
@@ -905,3 +974,88 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
905 return -ETIMEDOUT; 974 return -ETIMEDOUT;
906} 975}
907 976
977static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
978 void *param, u32 handle, u32 status)
979{
980 struct nv_pmu_rpc_header rpc;
981
982 memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
983 if (param)
984 memcpy(&rpc, param, sizeof(struct nv_pmu_rpc_header));
985
986 if (rpc.flcn_status) {
987 nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
988 rpc.flcn_status, rpc.function);
989 goto exit;
990 }
991
992 switch (msg->hdr.unit_id) {
993 /* TBD case will be added */
994 default:
995 nvgpu_err(g, " Invalid RPC response, stats 0x%x",
996 rpc.flcn_status);
997 break;
998 }
999
1000exit:
1001 /* free allocated memory */
1002 if (param)
1003 nvgpu_kfree(g, param);
1004}
1005
1006int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
1007 u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
1008 void *caller_cb_param)
1009{
1010 struct gk20a *g = pmu->g;
1011 struct pmu_cmd cmd;
1012 struct pmu_payload payload;
1013 pmu_callback callback = caller_cb;
1014 void *rpc_buff = NULL;
1015 void *cb_param = caller_cb_param;
1016 u32 seq = 0;
1017 int status = 0;
1018
1019 if (!pmu->pmu_ready) {
1020 nvgpu_warn(g, "PMU is not ready to process RPC");
1021 return -EINVAL;
1022 }
1023
1024 rpc_buff = nvgpu_kzalloc(g, size_rpc);
1025 if (!rpc_buff)
1026 return -ENOMEM;
1027
1028 memset(&cmd, 0, sizeof(struct pmu_cmd));
1029 memset(&payload, 0, sizeof(struct pmu_payload));
1030
1031 cmd.hdr.unit_id = rpc->unit_id;
1032 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd);
1033 cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID;
1034 cmd.cmd.rpc.flags = rpc->flags;
1035
1036 memcpy(rpc_buff, rpc, size_rpc);
1037 payload.rpc.prpc = rpc_buff;
1038 payload.rpc.size_rpc = size_rpc;
1039 payload.rpc.size_scratch = size_scratch;
1040
1041 /* assign default RPC handler & buffer */
1042 if (!callback && !cb_param) {
1043 callback = pmu_rpc_handler;
1044 cb_param = rpc_buff;
1045 }
1046
1047 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
1048 PMU_COMMAND_QUEUE_LPQ, pmu_rpc_handler,
1049 cb_param, &seq, ~0);
1050 if (status) {
1051 nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x",
1052 status, rpc->function);
1053 }
1054
1055 /* if caller passed buff then free allocated RPC buffer */
1056 if (caller_cb_param)
1057 nvgpu_kfree(g, rpc_buff);
1058
1059 return status;
1060
1061}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index c0ceca61..cd7e1879 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -143,9 +143,29 @@ enum {
143#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000) 143#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
144#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) 144#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
145 145
146/* RPC */
147#define PMU_RPC_EXECUTE(_stat, _pmu, _unit, _func, _prpc, _size)\
148 do { \
149 memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
150 \
151 (_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
152 (_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
153 (_prpc)->hdr.flags = 0x0; \
154 \
155 _stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
156 (sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
157 (_size), NULL, NULL); \
158 } while (0)
159
146typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, 160typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
147 u32); 161 u32);
148 162
163struct pmu_rpc_desc {
164 void *prpc;
165 u16 size_rpc;
166 u16 size_scratch;
167};
168
149struct pmu_payload { 169struct pmu_payload {
150 struct { 170 struct {
151 void *buf; 171 void *buf;
@@ -153,6 +173,7 @@ struct pmu_payload {
153 u32 size; 173 u32 size;
154 u32 fb_size; 174 u32 fb_size;
155 } in, out; 175 } in, out;
176 struct pmu_rpc_desc rpc;
156}; 177};
157 178
158struct pmu_ucode_desc { 179struct pmu_ucode_desc {
@@ -472,4 +493,8 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
472void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu); 493void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
473bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); 494bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
474 495
496/* PMU RPC */
497int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
498 u16 size_rpc, u16 size_scratch, pmu_callback callback, void *cb_param);
499
475#endif /* __NVGPU_PMU_H__ */ 500#endif /* __NVGPU_PMU_H__ */
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
index f39e7b6c..2284289e 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h
@@ -127,4 +127,22 @@ union name##_aligned { \
127 (PMU_FB_COPY_RW_ALIGNMENT))]; \ 127 (PMU_FB_COPY_RW_ALIGNMENT))]; \
128} 128}
129 129
130/* RPC (Remote Procedure Call) header structure */
131#define NV_PMU_RPC_FLAGS_TYPE_SYNC 0x00000000
132
133struct nv_pmu_rpc_header {
134 /* Identifies the unit servicing requested RPC*/
135 u8 unit_id;
136 /* Identifies the requested RPC (within the unit)*/
137 u8 function;
138 /* RPC call flags (@see PMU_RPC_FLAGS) */
139 u8 flags;
140 /* Falcon's status code to describe failures*/
141 u8 flcn_status;
142 /* RPC's total exec. time (measured on nvgpu driver side)*/
143 u32 exec_time_nv_ns;
144 /* RPC's actual exec. time (measured on PMU side)*/
145 u32 exec_time_pmu_ns;
146};
147
130#endif /* _GPMUIFCMN_H_*/ 148#endif /* _GPMUIFCMN_H_*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
index fea6326a..208644d7 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
@@ -39,6 +39,47 @@
39#include "gpmuifthermsensor.h" 39#include "gpmuifthermsensor.h"
40#include "gpmuifseq.h" 40#include "gpmuifseq.h"
41 41
42/*
43 * Command requesting execution of the RPC (Remote Procedure Call)
44 */
45struct nv_pmu_rpc_cmd {
46 /* Must be set to @ref NV_PMU_RPC_CMD_ID */
47 u8 cmd_type;
48 /* RPC call flags (@see PMU_RPC_FLAGS) */
49 u8 flags;
50 /* Size of RPC structure allocated
51 * within NV managed DMEM heap
52 */
53 u16 rpc_dmem_size;
54 /*
55 * DMEM pointer of RPC structure allocated
56 * within RM managed DMEM heap.
57 */
58 u32 rpc_dmem_ptr;
59};
60
61#define NV_PMU_RPC_CMD_ID 0x80
62
63/* Message carrying the result of the RPC execution */
64struct nv_pmu_rpc_msg {
65 /* Must be set to @ref NV_PMU_RPC_MSG_ID */
66 u8 msg_type;
67 /* RPC call flags (@see PMU_RPC_FLAGS)*/
68 u8 flags;
69 /*
70 * Size of RPC structure allocated
71 * within NV managed DMEM heap.
72 */
73 u16 rpc_dmem_size;
74 /*
75 * DMEM pointer of RPC structure allocated
76 * within NV managed DMEM heap.
77 */
78 u32 rpc_dmem_ptr;
79};
80
81#define NV_PMU_RPC_MSG_ID 0x80
82
42struct pmu_cmd { 83struct pmu_cmd {
43 struct pmu_hdr hdr; 84 struct pmu_hdr hdr;
44 union { 85 union {
@@ -52,6 +93,7 @@ struct pmu_cmd {
52 struct nv_pmu_clk_cmd clk; 93 struct nv_pmu_clk_cmd clk;
53 struct nv_pmu_pmgr_cmd pmgr; 94 struct nv_pmu_pmgr_cmd pmgr;
54 struct nv_pmu_therm_cmd therm; 95 struct nv_pmu_therm_cmd therm;
96 struct nv_pmu_rpc_cmd rpc;
55 } cmd; 97 } cmd;
56}; 98};
57 99
@@ -69,6 +111,7 @@ struct pmu_msg {
69 struct nv_pmu_clk_msg clk; 111 struct nv_pmu_clk_msg clk;
70 struct nv_pmu_pmgr_msg pmgr; 112 struct nv_pmu_pmgr_msg pmgr;
71 struct nv_pmu_therm_msg therm; 113 struct nv_pmu_therm_msg therm;
114 struct nv_pmu_rpc_msg rpc;
72 } msg; 115 } msg;
73}; 116};
74 117