summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/css_vgpu.c
diff options
context:
space:
mode:
authorAntony Clince Alex <aalex@nvidia.com>2018-05-29 05:59:06 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 00:41:31 -0400
commit9751fb0b5405bb283f5bd884115465443f5f8608 (patch)
tree80df0a6e599e33d9b6e3a7e62dd0792e64b30862 /drivers/gpu/nvgpu/vgpu/css_vgpu.c
parentd27d9ff7a89ab1a590a9cc8367af7f3a3ea698a8 (diff)
gpu: nvgpu: vgpu: Unified CSS VGPU HAL
- defined platform agnostic wrapper for mempool mapping and unmapping. - used platform agnositc wrapper for device tree parsing. - modified css_gr_gk20a to include special handling incase of rm-server JIRA: VQRM:3699 Change-Id: I08fd26052edfa1edf45a67be57f7d27c38ad106a Signed-off-by: Antony Clince Alex <aalex@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1733576 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/css_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/css_vgpu.c237
1 files changed, 237 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/vgpu/css_vgpu.c
new file mode 100644
index 00000000..0e44b81a
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/css_vgpu.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#if defined(CONFIG_GK20A_CYCLE_STATS)
23
24#include <nvgpu/vgpu/vgpu_ivm.h>
25#include <nvgpu/vgpu/vgpu.h>
26#include <nvgpu/vgpu/tegra_vgpu.h>
27#include <nvgpu/dt.h>
28
29#include "gk20a/gk20a.h"
30#include "gk20a/channel_gk20a.h"
31#include "gk20a/css_gr_gk20a.h"
32
33#include "vgpu/css_vgpu.h"
34
35static struct tegra_hv_ivm_cookie *css_cookie;
36
37static struct tegra_hv_ivm_cookie *vgpu_css_reserve_mempool(struct gk20a *g)
38{
39 struct tegra_hv_ivm_cookie *cookie;
40 u32 mempool;
41 int err;
42
43 err = nvgpu_dt_read_u32_index(g, "mempool-css", 1, &mempool);
44 if (err) {
45 nvgpu_err(g, "dt missing mempool-css");
46 return ERR_PTR(err);
47 }
48
49 cookie = vgpu_ivm_mempool_reserve(mempool);
50 if (IS_ERR_OR_NULL(cookie)) {
51 nvgpu_err(g, "mempool %u reserve failed", mempool);
52 return ERR_PTR(-EINVAL);
53 }
54 return cookie;
55}
56
57u32 vgpu_css_get_buffer_size(struct gk20a *g)
58{
59 struct tegra_hv_ivm_cookie *cookie;
60 u32 size;
61
62 nvgpu_log_fn(g, " ");
63
64 if (css_cookie) {
65 size = (u32)vgpu_ivm_get_size(css_cookie);
66 nvgpu_log_info(g, "buffer size = 0x%08x", size);
67 return size;
68 }
69
70 cookie = vgpu_css_reserve_mempool(g);
71 if (IS_ERR(cookie))
72 return 0;
73
74 size = vgpu_ivm_get_size(cookie);
75
76 vgpu_ivm_mempool_unreserve(cookie);
77 nvgpu_log_info(g, "buffer size = 0x%08x", size);
78 return size;
79}
80
81static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr)
82{
83 struct gk20a *g = gr->g;
84 struct gk20a_cs_snapshot *data = gr->cs_data;
85 void *buf = NULL;
86 int err;
87 u64 size;
88
89 nvgpu_log_fn(g, " ");
90
91 if (data->hw_snapshot)
92 return 0;
93
94 css_cookie = vgpu_css_reserve_mempool(g);
95 if (IS_ERR(css_cookie))
96 return PTR_ERR(css_cookie);
97
98 size = vgpu_ivm_get_size(css_cookie);
99 /* Make sure buffer size is large enough */
100 if (size < CSS_MIN_HW_SNAPSHOT_SIZE) {
101 nvgpu_info(g, "mempool size 0x%llx too small", size);
102 err = -ENOMEM;
103 goto fail;
104 }
105
106 buf = vgpu_ivm_mempool_map(css_cookie);
107 if (!buf) {
108 nvgpu_info(g, "vgpu_ivm_mempool_map failed");
109 err = -EINVAL;
110 goto fail;
111 }
112
113 data->hw_snapshot = buf;
114 data->hw_end = data->hw_snapshot +
115 size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
116 data->hw_get = data->hw_snapshot;
117 memset(data->hw_snapshot, 0xff, size);
118 return 0;
119fail:
120 vgpu_ivm_mempool_unreserve(css_cookie);
121 css_cookie = NULL;
122 return err;
123}
124
125void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
126{
127 struct gk20a_cs_snapshot *data = gr->cs_data;
128 struct gk20a *g = gr->g;
129
130 if (!data->hw_snapshot)
131 return;
132
133 vgpu_ivm_mempool_unmap(css_cookie, data->hw_snapshot);
134 data->hw_snapshot = NULL;
135
136 vgpu_ivm_mempool_unreserve(css_cookie);
137 css_cookie = NULL;
138
139 nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
140}
141
142int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
143 u32 *pending, bool *hw_overflow)
144{
145 struct gk20a *g = ch->g;
146 struct tegra_vgpu_cmd_msg msg = {};
147 struct tegra_vgpu_channel_cyclestats_snapshot_params *p;
148 struct gr_gk20a *gr = &g->gr;
149 struct gk20a_cs_snapshot *data = gr->cs_data;
150 int err;
151
152 nvgpu_log_fn(g, " ");
153
154 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
155 msg.handle = vgpu_get_handle(g);
156 p = &msg.params.cyclestats_snapshot;
157 p->handle = ch->virt_ctx;
158 p->subcmd = TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_FLUSH;
159 p->buf_info = (uintptr_t)data->hw_get - (uintptr_t)data->hw_snapshot;
160
161 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
162
163 err = (err || msg.ret) ? -1 : 0;
164
165 *pending = p->buf_info;
166 *hw_overflow = p->hw_overflow;
167
168 return err;
169}
170
171static int vgpu_css_attach(struct channel_gk20a *ch,
172 struct gk20a_cs_snapshot_client *cs_client)
173{
174 struct gk20a *g = ch->g;
175 struct tegra_vgpu_cmd_msg msg = {};
176 struct tegra_vgpu_channel_cyclestats_snapshot_params *p =
177 &msg.params.cyclestats_snapshot;
178 int err;
179
180 nvgpu_log_fn(g, " ");
181
182 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
183 msg.handle = vgpu_get_handle(g);
184 p->handle = ch->virt_ctx;
185 p->subcmd = TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_ATTACH;
186 p->perfmon_count = cs_client->perfmon_count;
187
188 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
189 err = err ? err : msg.ret;
190 if (err)
191 nvgpu_err(g, "failed");
192 else
193 cs_client->perfmon_start = p->perfmon_start;
194
195 return err;
196}
197
198int vgpu_css_detach(struct channel_gk20a *ch,
199 struct gk20a_cs_snapshot_client *cs_client)
200{
201 struct gk20a *g = ch->g;
202 struct tegra_vgpu_cmd_msg msg = {};
203 struct tegra_vgpu_channel_cyclestats_snapshot_params *p =
204 &msg.params.cyclestats_snapshot;
205 int err;
206
207 nvgpu_log_fn(g, " ");
208
209 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
210 msg.handle = vgpu_get_handle(g);
211 p->handle = ch->virt_ctx;
212 p->subcmd = TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_DETACH;
213 p->perfmon_start = cs_client->perfmon_start;
214 p->perfmon_count = cs_client->perfmon_count;
215
216 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
217 err = err ? err : msg.ret;
218 if (err)
219 nvgpu_err(g, "failed");
220
221 return err;
222}
223
224int vgpu_css_enable_snapshot_buffer(struct channel_gk20a *ch,
225 struct gk20a_cs_snapshot_client *cs_client)
226{
227 int ret;
228
229 ret = vgpu_css_attach(ch, cs_client);
230 if (ret)
231 return ret;
232
233 ret = vgpu_css_init_snapshot_buffer(&ch->g->gr);
234 return ret;
235}
236
237#endif /* CONFIG_GK20A_CYCLE_STATS */