summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/perf/perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/perf/perf.c')
-rw-r--r--drivers/gpu/nvgpu/perf/perf.c118
1 files changed, 118 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/perf/perf.c b/drivers/gpu/nvgpu/perf/perf.c
new file mode 100644
index 00000000..41ebb315
--- /dev/null
+++ b/drivers/gpu/nvgpu/perf/perf.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "perf.h"
16#include "pmuif/gpmuifperf.h"
17#include "pmuif/gpmuifperfvfe.h"
18#include "gk20a/pmu_gk20a.h"
19#include "clk/clk_arb.h"
20
21struct perfrpc_pmucmdhandler_params {
22 struct nv_pmu_perf_rpc *prpccall;
23 u32 success;
24};
25
26static void perfrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
27 void *param, u32 handle, u32 status)
28{
29 struct perfrpc_pmucmdhandler_params *phandlerparams =
30 (struct perfrpc_pmucmdhandler_params *)param;
31
32 gk20a_dbg_info("");
33
34 if (msg->msg.perf.msg_type != NV_PMU_PERF_MSG_ID_RPC) {
35 gk20a_err(dev_from_gk20a(g),
36 "unsupported msg for VFE LOAD RPC %x",
37 msg->msg.perf.msg_type);
38 return;
39 }
40
41 if (phandlerparams->prpccall->b_supported)
42 phandlerparams->success = 1;
43}
44
45static int pmu_handle_perf_event(struct gk20a *g, void *pmu_msg)
46{
47 struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmu_msg;
48
49 gk20a_dbg_fn("");
50 switch (msg->msg_type) {
51 case NV_PMU_PERF_MSG_ID_VFE_CALLBACK:
52 nvgpu_clk_arb_schedule_vf_table_update(g);
53 break;
54 default:
55 WARN_ON(1);
56 break;
57 }
58 return 0;
59}
60
61u32 perf_pmu_vfe_load(struct gk20a *g)
62{
63 struct pmu_cmd cmd;
64 struct pmu_msg msg;
65 struct pmu_payload payload = { {0} };
66 u32 status;
67 u32 seqdesc;
68 struct nv_pmu_perf_rpc rpccall = {0};
69 struct perfrpc_pmucmdhandler_params handler = {0};
70
71 /*register call back for future VFE updates*/
72 g->ops.perf.handle_pmu_perf_event = pmu_handle_perf_event;
73
74 rpccall.function = NV_PMU_PERF_RPC_ID_VFE_LOAD;
75 rpccall.params.vfe_load.b_load = true;
76 cmd.hdr.unit_id = PMU_UNIT_PERF;
77 cmd.hdr.size = (u32)sizeof(struct nv_pmu_perf_cmd) +
78 (u32)sizeof(struct pmu_hdr);
79
80 cmd.cmd.perf.cmd_type = NV_PMU_PERF_CMD_ID_RPC;
81 msg.hdr.size = sizeof(struct pmu_msg);
82
83 payload.in.buf = (u8 *)&rpccall;
84 payload.in.size = (u32)sizeof(struct nv_pmu_perf_rpc);
85 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
86 payload.in.offset = NV_PMU_PERF_CMD_RPC_ALLOC_OFFSET;
87
88 payload.out.buf = (u8 *)&rpccall;
89 payload.out.size = (u32)sizeof(struct nv_pmu_perf_rpc);
90 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
91 payload.out.offset = NV_PMU_PERF_MSG_RPC_ALLOC_OFFSET;
92
93 handler.prpccall = &rpccall;
94 handler.success = 0;
95
96 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
97 PMU_COMMAND_QUEUE_LPQ,
98 perfrpc_pmucmdhandler, (void *)&handler,
99 &seqdesc, ~0);
100
101 if (status) {
102 gk20a_err(dev_from_gk20a(g),
103 "unable to post perf RPC cmd %x",
104 cmd.cmd.perf.cmd_type);
105 goto done;
106 }
107
108 pmu_wait_message_cond(&g->pmu,
109 gk20a_get_gr_idle_timeout(g),
110 &handler.success, 1);
111
112 if (handler.success == 0) {
113 status = -EINVAL;
114 gk20a_err(dev_from_gk20a(g), "rpc call to load VFE failed");
115 }
116done:
117 return status;
118}