summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/vgpu/vgpu.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c350
1 files changed, 350 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
new file mode 100644
index 00000000..eb56d4f9
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/enabled.h>
24#include <nvgpu/bus.h>
25#include <nvgpu/vgpu/vgpu_ivc.h>
26#include <nvgpu/vgpu/vgpu.h>
27
28#include "gk20a/gk20a.h"
29#include "fecs_trace_vgpu.h"
30
31int vgpu_comm_init(struct gk20a *g)
32{
33 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
34
35 return vgpu_ivc_init(g, 3, queue_sizes, TEGRA_VGPU_QUEUE_CMD,
36 ARRAY_SIZE(queue_sizes));
37}
38
39void vgpu_comm_deinit(void)
40{
41 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
42
43 vgpu_ivc_deinit(TEGRA_VGPU_QUEUE_CMD, ARRAY_SIZE(queue_sizes));
44}
45
46int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
47 size_t size_out)
48{
49 void *handle;
50 size_t size = size_in;
51 void *data = msg;
52 int err;
53
54 err = vgpu_ivc_sendrecv(vgpu_ivc_get_server_vmid(),
55 TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
56 if (!err) {
57 WARN_ON(size < size_out);
58 memcpy(msg, data, size_out);
59 vgpu_ivc_release(handle);
60 }
61
62 return err;
63}
64
65u64 vgpu_connect(void)
66{
67 struct tegra_vgpu_cmd_msg msg;
68 struct tegra_vgpu_connect_params *p = &msg.params.connect;
69 int err;
70
71 msg.cmd = TEGRA_VGPU_CMD_CONNECT;
72 p->module = TEGRA_VGPU_MODULE_GPU;
73 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
74
75 return (err || msg.ret) ? 0 : p->handle;
76}
77
78int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
79{
80 struct tegra_vgpu_cmd_msg msg;
81 struct tegra_vgpu_attrib_params *p = &msg.params.attrib;
82 int err;
83
84 msg.cmd = TEGRA_VGPU_CMD_GET_ATTRIBUTE;
85 msg.handle = handle;
86 p->attrib = attrib;
87 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
88
89 if (err || msg.ret)
90 return -1;
91
92 *value = p->value;
93 return 0;
94}
95
96static void vgpu_handle_channel_event(struct gk20a *g,
97 struct tegra_vgpu_channel_event_info *info)
98{
99 struct tsg_gk20a *tsg;
100
101 if (!info->is_tsg) {
102 nvgpu_err(g, "channel event posted");
103 return;
104 }
105
106 if (info->id >= g->fifo.num_channels ||
107 info->event_id >= TEGRA_VGPU_CHANNEL_EVENT_ID_MAX) {
108 nvgpu_err(g, "invalid channel event");
109 return;
110 }
111
112 tsg = &g->fifo.tsg[info->id];
113
114 gk20a_tsg_event_id_post_event(tsg, info->event_id);
115}
116
117int vgpu_intr_thread(void *dev_id)
118{
119 struct gk20a *g = dev_id;
120 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
121
122 while (true) {
123 struct tegra_vgpu_intr_msg *msg;
124 u32 sender;
125 void *handle;
126 size_t size;
127 int err;
128
129 err = vgpu_ivc_recv(TEGRA_VGPU_QUEUE_INTR, &handle,
130 (void **)&msg, &size, &sender);
131 if (err == -ETIME)
132 continue;
133 if (WARN_ON(err))
134 continue;
135
136 if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
137 vgpu_ivc_release(handle);
138 break;
139 }
140
141 switch (msg->event) {
142 case TEGRA_VGPU_EVENT_INTR:
143 if (msg->unit == TEGRA_VGPU_INTR_GR)
144 vgpu_gr_isr(g, &msg->info.gr_intr);
145 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_GR)
146 vgpu_gr_nonstall_isr(g,
147 &msg->info.gr_nonstall_intr);
148 else if (msg->unit == TEGRA_VGPU_INTR_FIFO)
149 vgpu_fifo_isr(g, &msg->info.fifo_intr);
150 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_FIFO)
151 vgpu_fifo_nonstall_isr(g,
152 &msg->info.fifo_nonstall_intr);
153 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_CE2)
154 vgpu_ce2_nonstall_isr(g,
155 &msg->info.ce2_nonstall_intr);
156 break;
157#ifdef CONFIG_GK20A_CTXSW_TRACE
158 case TEGRA_VGPU_EVENT_FECS_TRACE:
159 vgpu_fecs_trace_data_update(g);
160 break;
161#endif
162 case TEGRA_VGPU_EVENT_CHANNEL:
163 vgpu_handle_channel_event(g, &msg->info.channel_event);
164 break;
165 case TEGRA_VGPU_EVENT_SM_ESR:
166 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
167 break;
168 default:
169 nvgpu_err(g, "unknown event %u", msg->event);
170 break;
171 }
172
173 vgpu_ivc_release(handle);
174 }
175
176 while (!nvgpu_thread_should_stop(&priv->intr_handler))
177 nvgpu_msleep(10);
178 return 0;
179}
180
181void vgpu_remove_support_common(struct gk20a *g)
182{
183 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
184 struct tegra_vgpu_intr_msg msg;
185 int err;
186
187 if (g->dbg_regops_tmp_buf)
188 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
189
190 if (g->pmu.remove_support)
191 g->pmu.remove_support(&g->pmu);
192
193 if (g->gr.remove_support)
194 g->gr.remove_support(&g->gr);
195
196 if (g->fifo.remove_support)
197 g->fifo.remove_support(&g->fifo);
198
199 if (g->mm.remove_support)
200 g->mm.remove_support(&g->mm);
201
202 msg.event = TEGRA_VGPU_EVENT_ABORT;
203 err = vgpu_ivc_send(vgpu_ivc_get_peer_self(), TEGRA_VGPU_QUEUE_INTR,
204 &msg, sizeof(msg));
205 WARN_ON(err);
206 nvgpu_thread_stop(&priv->intr_handler);
207}
208
209void vgpu_detect_chip(struct gk20a *g)
210{
211 struct nvgpu_gpu_params *p = &g->params;
212 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
213
214 p->gpu_arch = priv->constants.arch;
215 p->gpu_impl = priv->constants.impl;
216 p->gpu_rev = priv->constants.rev;
217
218 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
219 p->gpu_arch,
220 p->gpu_impl,
221 p->gpu_rev);
222}
223
224int vgpu_init_gpu_characteristics(struct gk20a *g)
225{
226 int err;
227
228 gk20a_dbg_fn("");
229
230 err = gk20a_init_gpu_characteristics(g);
231 if (err)
232 return err;
233
234 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
235
236 /* features vgpu does not support */
237 __nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
238
239 return 0;
240}
241
242int vgpu_read_ptimer(struct gk20a *g, u64 *value)
243{
244 struct tegra_vgpu_cmd_msg msg = {0};
245 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
246 int err;
247
248 gk20a_dbg_fn("");
249
250 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
251 msg.handle = vgpu_get_handle(g);
252
253 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
254 err = err ? err : msg.ret;
255 if (!err)
256 *value = p->time;
257 else
258 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
259
260 return err;
261}
262
263int vgpu_get_timestamps_zipper(struct gk20a *g,
264 u32 source_id, u32 count,
265 struct nvgpu_cpu_time_correlation_sample *samples)
266{
267 struct tegra_vgpu_cmd_msg msg = {0};
268 struct tegra_vgpu_get_timestamps_zipper_params *p =
269 &msg.params.get_timestamps_zipper;
270 int err;
271 u32 i;
272
273 gk20a_dbg_fn("");
274
275 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
276 nvgpu_err(g, "count %u overflow", count);
277 return -EINVAL;
278 }
279
280 msg.cmd = TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER;
281 msg.handle = vgpu_get_handle(g);
282 p->source_id = TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC;
283 p->count = count;
284
285 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
286 err = err ? err : msg.ret;
287 if (err) {
288 nvgpu_err(g, "vgpu get timestamps zipper failed, err=%d", err);
289 return err;
290 }
291
292 for (i = 0; i < count; i++) {
293 samples[i].cpu_timestamp = p->samples[i].cpu_timestamp;
294 samples[i].gpu_timestamp = p->samples[i].gpu_timestamp;
295 }
296
297 return err;
298}
299
300int vgpu_init_hal(struct gk20a *g)
301{
302 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
303 int err;
304
305 switch (ver) {
306 case NVGPU_GPUID_GP10B:
307 gk20a_dbg_info("gp10b detected");
308 err = vgpu_gp10b_init_hal(g);
309 break;
310 case NVGPU_GPUID_GV11B:
311 err = vgpu_gv11b_init_hal(g);
312 break;
313 default:
314 nvgpu_err(g, "no support for %x", ver);
315 err = -ENODEV;
316 break;
317 }
318
319 return err;
320}
321
322int vgpu_get_constants(struct gk20a *g)
323{
324 struct tegra_vgpu_cmd_msg msg = {};
325 struct tegra_vgpu_constants_params *p = &msg.params.constants;
326 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
327 int err;
328
329 gk20a_dbg_fn("");
330
331 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
332 msg.handle = vgpu_get_handle(g);
333 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
334 err = err ? err : msg.ret;
335
336 if (unlikely(err)) {
337 nvgpu_err(g, "%s failed, err=%d", __func__, err);
338 return err;
339 }
340
341 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
342 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
343 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
344 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
345 return -EINVAL;
346 }
347
348 priv->constants = *p;
349 return 0;
350}