diff options
Diffstat (limited to 'include/pmu_perf/pmu_perf.c')
-rw-r--r-- | include/pmu_perf/pmu_perf.c | 128 |
1 files changed, 0 insertions, 128 deletions
diff --git a/include/pmu_perf/pmu_perf.c b/include/pmu_perf/pmu_perf.c deleted file mode 100644 index a3b94ce..0000000 --- a/include/pmu_perf/pmu_perf.c +++ /dev/null | |||
@@ -1,128 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <nvgpu/bug.h> | ||
24 | #include <nvgpu/pmu.h> | ||
25 | #include <nvgpu/clk_arb.h> | ||
26 | #include <nvgpu/gk20a.h> | ||
27 | |||
28 | #include "pmu_perf.h" | ||
29 | |||
30 | struct perfrpc_pmucmdhandler_params { | ||
31 | struct nv_pmu_perf_rpc *prpccall; | ||
32 | u32 success; | ||
33 | }; | ||
34 | |||
35 | static void perfrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, | ||
36 | void *param, u32 handle, u32 status) | ||
37 | { | ||
38 | struct perfrpc_pmucmdhandler_params *phandlerparams = | ||
39 | (struct perfrpc_pmucmdhandler_params *)param; | ||
40 | |||
41 | nvgpu_log_info(g, " "); | ||
42 | |||
43 | if (msg->msg.perf.msg_type != NV_PMU_PERF_MSG_ID_RPC) { | ||
44 | nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", | ||
45 | msg->msg.perf.msg_type); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | if (phandlerparams->prpccall->b_supported) { | ||
50 | phandlerparams->success = 1; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static int pmu_handle_perf_event(struct gk20a *g, void *pmu_msg) | ||
55 | { | ||
56 | struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmu_msg; | ||
57 | |||
58 | nvgpu_log_fn(g, " "); | ||
59 | switch (msg->msg_type) { | ||
60 | case NV_PMU_PERF_MSG_ID_VFE_CALLBACK: | ||
61 | nvgpu_clk_arb_schedule_vf_table_update(g); | ||
62 | break; | ||
63 | default: | ||
64 | WARN_ON(1); | ||
65 | break; | ||
66 | } | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | u32 perf_pmu_vfe_load(struct gk20a *g) | ||
71 | { | ||
72 | struct pmu_cmd cmd; | ||
73 | struct pmu_payload payload; | ||
74 | u32 status; | ||
75 | u32 seqdesc; | ||
76 | struct nv_pmu_perf_rpc rpccall; | ||
77 | struct perfrpc_pmucmdhandler_params handler; | ||
78 | |||
79 | memset(&payload, 0, sizeof(struct pmu_payload)); | ||
80 | memset(&rpccall, 0, sizeof(struct nv_pmu_perf_rpc)); | ||
81 | memset(&handler, 0, sizeof(struct perfrpc_pmucmdhandler_params)); | ||
82 | |||
83 | /*register call back for future VFE updates*/ | ||
84 | g->ops.pmu_perf.handle_pmu_perf_event = pmu_handle_perf_event; | ||
85 | |||
86 | rpccall.function = NV_PMU_PERF_RPC_ID_VFE_LOAD; | ||
87 | rpccall.params.vfe_load.b_load = true; | ||
88 | cmd.hdr.unit_id = PMU_UNIT_PERF; | ||
89 | cmd.hdr.size = (u32)sizeof(struct nv_pmu_perf_cmd) + | ||
90 | (u32)sizeof(struct pmu_hdr); | ||
91 | |||
92 | cmd.cmd.perf.cmd_type = NV_PMU_PERF_CMD_ID_RPC; | ||
93 | |||
94 | payload.in.buf = (u8 *)&rpccall; | ||
95 | payload.in.size = (u32)sizeof(struct nv_pmu_perf_rpc); | ||
96 | payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; | ||
97 | payload.in.offset = NV_PMU_PERF_CMD_RPC_ALLOC_OFFSET; | ||
98 | |||
99 | payload.out.buf = (u8 *)&rpccall; | ||
100 | payload.out.size = (u32)sizeof(struct nv_pmu_perf_rpc); | ||
101 | payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; | ||
102 | payload.out.offset = NV_PMU_PERF_MSG_RPC_ALLOC_OFFSET; | ||
103 | |||
104 | handler.prpccall = &rpccall; | ||
105 | handler.success = 0; | ||
106 | |||
107 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, | ||
108 | PMU_COMMAND_QUEUE_LPQ, | ||
109 | perfrpc_pmucmdhandler, (void *)&handler, | ||
110 | &seqdesc, ~0); | ||
111 | |||
112 | if (status) { | ||
113 | nvgpu_err(g, "unable to post perf RPC cmd %x", | ||
114 | cmd.cmd.perf.cmd_type); | ||
115 | goto done; | ||
116 | } | ||
117 | |||
118 | pmu_wait_message_cond(&g->pmu, | ||
119 | gk20a_get_gr_idle_timeout(g), | ||
120 | &handler.success, 1); | ||
121 | |||
122 | if (handler.success == 0) { | ||
123 | status = -EINVAL; | ||
124 | nvgpu_err(g, "rpc call to load VFE failed"); | ||
125 | } | ||
126 | done: | ||
127 | return status; | ||
128 | } | ||