diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c | 1277 |
1 files changed, 0 insertions, 1277 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c deleted file mode 100644 index f455763b..00000000 --- a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c +++ /dev/null | |||
@@ -1,1277 +0,0 @@ | |||
1 | /* | ||
2 | * Virtualized GPU Graphics | ||
3 | * | ||
4 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <nvgpu/kmem.h> | ||
20 | #include <nvgpu/bug.h> | ||
21 | #include <nvgpu/dma.h> | ||
22 | #include <nvgpu/error_notifier.h> | ||
23 | #include <nvgpu/dma.h> | ||
24 | #include <nvgpu/vgpu/vgpu_ivc.h> | ||
25 | |||
26 | #include "vgpu.h" | ||
27 | #include "gr_vgpu.h" | ||
28 | #include "gk20a/gk20a.h" | ||
29 | #include "gk20a/dbg_gpu_gk20a.h" | ||
30 | #include "gk20a/channel_gk20a.h" | ||
31 | #include "gk20a/tsg_gk20a.h" | ||
32 | |||
33 | #include <nvgpu/hw/gk20a/hw_gr_gk20a.h> | ||
34 | #include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h> | ||
35 | |||
36 | void vgpu_gr_detect_sm_arch(struct gk20a *g) | ||
37 | { | ||
38 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
39 | |||
40 | gk20a_dbg_fn(""); | ||
41 | |||
42 | g->params.sm_arch_sm_version = | ||
43 | priv->constants.sm_arch_sm_version; | ||
44 | g->params.sm_arch_spa_version = | ||
45 | priv->constants.sm_arch_spa_version; | ||
46 | g->params.sm_arch_warp_count = | ||
47 | priv->constants.sm_arch_warp_count; | ||
48 | } | ||
49 | |||
50 | int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) | ||
51 | { | ||
52 | struct tegra_vgpu_cmd_msg msg; | ||
53 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
54 | int err; | ||
55 | |||
56 | gk20a_dbg_fn(""); | ||
57 | |||
58 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; | ||
59 | msg.handle = vgpu_get_handle(c->g); | ||
60 | p->handle = c->virt_ctx; | ||
61 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
62 | |||
63 | return (err || msg.ret) ? -1 : 0; | ||
64 | } | ||
65 | |||
66 | static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, | ||
67 | struct channel_gk20a *c, bool patch) | ||
68 | { | ||
69 | struct tegra_vgpu_cmd_msg msg; | ||
70 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
71 | int err; | ||
72 | |||
73 | gk20a_dbg_fn(""); | ||
74 | |||
75 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; | ||
76 | msg.handle = vgpu_get_handle(g); | ||
77 | p->handle = c->virt_ctx; | ||
78 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
79 | |||
80 | return (err || msg.ret) ? -1 : 0; | ||
81 | } | ||
82 | |||
83 | /* load saved fresh copy of gloden image into channel gr_ctx */ | ||
84 | static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, | ||
85 | struct channel_gk20a *c) | ||
86 | { | ||
87 | struct tegra_vgpu_cmd_msg msg; | ||
88 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
89 | int err; | ||
90 | |||
91 | gk20a_dbg_fn(""); | ||
92 | |||
93 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; | ||
94 | msg.handle = vgpu_get_handle(g); | ||
95 | p->handle = c->virt_ctx; | ||
96 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
97 | |||
98 | return (err || msg.ret) ? -1 : 0; | ||
99 | } | ||
100 | |||
101 | int vgpu_gr_init_ctx_state(struct gk20a *g) | ||
102 | { | ||
103 | struct gr_gk20a *gr = &g->gr; | ||
104 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
105 | |||
106 | gk20a_dbg_fn(""); | ||
107 | |||
108 | g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; | ||
109 | g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; | ||
110 | g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size; | ||
111 | if (!g->gr.ctx_vars.golden_image_size || | ||
112 | !g->gr.ctx_vars.zcull_ctxsw_image_size || | ||
113 | !g->gr.ctx_vars.pm_ctxsw_image_size) | ||
114 | return -ENXIO; | ||
115 | |||
116 | gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; | ||
117 | g->gr.ctx_vars.priv_access_map_size = 512 * 1024; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) | ||
122 | { | ||
123 | struct gr_gk20a *gr = &g->gr; | ||
124 | int attr_buffer_size; | ||
125 | |||
126 | u32 cb_buffer_size = gr->bundle_cb_default_size * | ||
127 | gr_scc_bundle_cb_size_div_256b_byte_granularity_v(); | ||
128 | |||
129 | u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * | ||
130 | gr_scc_pagepool_total_pages_byte_granularity_v(); | ||
131 | |||
132 | gk20a_dbg_fn(""); | ||
133 | |||
134 | attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); | ||
135 | |||
136 | gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); | ||
137 | gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; | ||
138 | |||
139 | gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); | ||
140 | gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; | ||
141 | |||
142 | gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); | ||
143 | gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; | ||
144 | |||
145 | gk20a_dbg_info("priv access map size : %d", | ||
146 | gr->ctx_vars.priv_access_map_size); | ||
147 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = | ||
148 | gr->ctx_vars.priv_access_map_size; | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, | ||
154 | struct channel_gk20a *c) | ||
155 | { | ||
156 | struct tegra_vgpu_cmd_msg msg; | ||
157 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
158 | struct vm_gk20a *ch_vm = c->vm; | ||
159 | struct tsg_gk20a *tsg; | ||
160 | u64 *g_bfr_va; | ||
161 | u64 *g_bfr_size; | ||
162 | struct gr_gk20a *gr = &g->gr; | ||
163 | u64 gpu_va; | ||
164 | u32 i; | ||
165 | int err; | ||
166 | |||
167 | gk20a_dbg_fn(""); | ||
168 | |||
169 | tsg = tsg_gk20a_from_ch(c); | ||
170 | if (!tsg) | ||
171 | return -EINVAL; | ||
172 | |||
173 | g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; | ||
174 | g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; | ||
175 | |||
176 | /* Circular Buffer */ | ||
177 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
178 | gr->global_ctx_buffer[CIRCULAR].mem.size, | ||
179 | gmmu_page_size_kernel); | ||
180 | |||
181 | if (!gpu_va) | ||
182 | goto clean_up; | ||
183 | g_bfr_va[CIRCULAR_VA] = gpu_va; | ||
184 | g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; | ||
185 | |||
186 | /* Attribute Buffer */ | ||
187 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
188 | gr->global_ctx_buffer[ATTRIBUTE].mem.size, | ||
189 | gmmu_page_size_kernel); | ||
190 | |||
191 | if (!gpu_va) | ||
192 | goto clean_up; | ||
193 | g_bfr_va[ATTRIBUTE_VA] = gpu_va; | ||
194 | g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; | ||
195 | |||
196 | /* Page Pool */ | ||
197 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
198 | gr->global_ctx_buffer[PAGEPOOL].mem.size, | ||
199 | gmmu_page_size_kernel); | ||
200 | if (!gpu_va) | ||
201 | goto clean_up; | ||
202 | g_bfr_va[PAGEPOOL_VA] = gpu_va; | ||
203 | g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; | ||
204 | |||
205 | /* Priv register Access Map */ | ||
206 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
207 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, | ||
208 | gmmu_page_size_kernel); | ||
209 | if (!gpu_va) | ||
210 | goto clean_up; | ||
211 | g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; | ||
212 | g_bfr_size[PRIV_ACCESS_MAP_VA] = | ||
213 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; | ||
214 | |||
215 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; | ||
216 | msg.handle = vgpu_get_handle(g); | ||
217 | p->handle = c->virt_ctx; | ||
218 | p->cb_va = g_bfr_va[CIRCULAR_VA]; | ||
219 | p->attr_va = g_bfr_va[ATTRIBUTE_VA]; | ||
220 | p->page_pool_va = g_bfr_va[PAGEPOOL_VA]; | ||
221 | p->priv_access_map_va = g_bfr_va[PRIV_ACCESS_MAP_VA]; | ||
222 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
223 | if (err || msg.ret) | ||
224 | goto clean_up; | ||
225 | |||
226 | tsg->gr_ctx.global_ctx_buffer_mapped = true; | ||
227 | return 0; | ||
228 | |||
229 | clean_up: | ||
230 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { | ||
231 | if (g_bfr_va[i]) { | ||
232 | __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], | ||
233 | gmmu_page_size_kernel); | ||
234 | g_bfr_va[i] = 0; | ||
235 | } | ||
236 | } | ||
237 | return -ENOMEM; | ||
238 | } | ||
239 | |||
240 | static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) | ||
241 | { | ||
242 | struct vm_gk20a *ch_vm = tsg->vm; | ||
243 | u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; | ||
244 | u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; | ||
245 | u32 i; | ||
246 | |||
247 | gk20a_dbg_fn(""); | ||
248 | |||
249 | if (tsg->gr_ctx.global_ctx_buffer_mapped) { | ||
250 | /* server will unmap on channel close */ | ||
251 | |||
252 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { | ||
253 | if (g_bfr_va[i]) { | ||
254 | __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], | ||
255 | gmmu_page_size_kernel); | ||
256 | g_bfr_va[i] = 0; | ||
257 | g_bfr_size[i] = 0; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | tsg->gr_ctx.global_ctx_buffer_mapped = false; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | int vgpu_gr_alloc_gr_ctx(struct gk20a *g, | ||
266 | struct nvgpu_gr_ctx *gr_ctx, | ||
267 | struct vm_gk20a *vm, | ||
268 | u32 class, | ||
269 | u32 flags) | ||
270 | { | ||
271 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
272 | struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; | ||
273 | struct gr_gk20a *gr = &g->gr; | ||
274 | int err; | ||
275 | |||
276 | gk20a_dbg_fn(""); | ||
277 | |||
278 | if (gr->ctx_vars.buffer_size == 0) | ||
279 | return 0; | ||
280 | |||
281 | /* alloc channel gr ctx buffer */ | ||
282 | gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; | ||
283 | gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; | ||
284 | |||
285 | gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, | ||
286 | gr->ctx_vars.buffer_total_size, | ||
287 | gmmu_page_size_kernel); | ||
288 | |||
289 | if (!gr_ctx->mem.gpu_va) | ||
290 | return -ENOMEM; | ||
291 | gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; | ||
292 | gr_ctx->mem.aperture = APERTURE_SYSMEM; | ||
293 | |||
294 | msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; | ||
295 | msg.handle = vgpu_get_handle(g); | ||
296 | p->as_handle = vm->handle; | ||
297 | p->gr_ctx_va = gr_ctx->mem.gpu_va; | ||
298 | p->class_num = class; | ||
299 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
300 | err = err ? err : msg.ret; | ||
301 | |||
302 | if (unlikely(err)) { | ||
303 | nvgpu_err(g, "fail to alloc gr_ctx"); | ||
304 | __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, | ||
305 | gmmu_page_size_kernel); | ||
306 | gr_ctx->mem.aperture = APERTURE_INVALID; | ||
307 | } else { | ||
308 | gr_ctx->virt_ctx = p->gr_ctx_handle; | ||
309 | } | ||
310 | |||
311 | return err; | ||
312 | } | ||
313 | |||
314 | static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, | ||
315 | struct channel_gk20a *c) | ||
316 | { | ||
317 | struct tsg_gk20a *tsg; | ||
318 | struct patch_desc *patch_ctx; | ||
319 | struct vm_gk20a *ch_vm = c->vm; | ||
320 | struct tegra_vgpu_cmd_msg msg; | ||
321 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
322 | int err; | ||
323 | |||
324 | gk20a_dbg_fn(""); | ||
325 | |||
326 | tsg = tsg_gk20a_from_ch(c); | ||
327 | if (!tsg) | ||
328 | return -EINVAL; | ||
329 | |||
330 | patch_ctx = &tsg->gr_ctx.patch_ctx; | ||
331 | patch_ctx->mem.size = 128 * sizeof(u32); | ||
332 | patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
333 | patch_ctx->mem.size, | ||
334 | gmmu_page_size_kernel); | ||
335 | if (!patch_ctx->mem.gpu_va) | ||
336 | return -ENOMEM; | ||
337 | |||
338 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; | ||
339 | msg.handle = vgpu_get_handle(g); | ||
340 | p->handle = c->virt_ctx; | ||
341 | p->patch_ctx_va = patch_ctx->mem.gpu_va; | ||
342 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
343 | if (err || msg.ret) { | ||
344 | __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, | ||
345 | gmmu_page_size_kernel); | ||
346 | err = -ENOMEM; | ||
347 | } | ||
348 | |||
349 | return err; | ||
350 | } | ||
351 | |||
352 | static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) | ||
353 | { | ||
354 | struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; | ||
355 | |||
356 | gk20a_dbg_fn(""); | ||
357 | |||
358 | if (patch_ctx->mem.gpu_va) { | ||
359 | /* server will free on channel close */ | ||
360 | |||
361 | __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, | ||
362 | gmmu_page_size_kernel); | ||
363 | patch_ctx->mem.gpu_va = 0; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) | ||
368 | { | ||
369 | struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; | ||
370 | struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; | ||
371 | |||
372 | gk20a_dbg_fn(""); | ||
373 | |||
374 | /* check if hwpm was ever initialized. If not, nothing to do */ | ||
375 | if (pm_ctx->mem.gpu_va == 0) | ||
376 | return; | ||
377 | |||
378 | /* server will free on channel close */ | ||
379 | |||
380 | __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, | ||
381 | gmmu_page_size_kernel); | ||
382 | pm_ctx->mem.gpu_va = 0; | ||
383 | } | ||
384 | |||
385 | void vgpu_gr_free_gr_ctx(struct gk20a *g, | ||
386 | struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) | ||
387 | { | ||
388 | struct tsg_gk20a *tsg; | ||
389 | |||
390 | gk20a_dbg_fn(""); | ||
391 | |||
392 | if (gr_ctx->mem.gpu_va) { | ||
393 | struct tegra_vgpu_cmd_msg msg; | ||
394 | struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; | ||
395 | int err; | ||
396 | |||
397 | msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; | ||
398 | msg.handle = vgpu_get_handle(g); | ||
399 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
400 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
401 | WARN_ON(err || msg.ret); | ||
402 | |||
403 | __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, | ||
404 | gmmu_page_size_kernel); | ||
405 | |||
406 | tsg = &g->fifo.tsg[gr_ctx->tsgid]; | ||
407 | vgpu_gr_unmap_global_ctx_buffers(tsg); | ||
408 | vgpu_gr_free_channel_patch_ctx(tsg); | ||
409 | vgpu_gr_free_channel_pm_ctx(tsg); | ||
410 | |||
411 | nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer); | ||
412 | nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer); | ||
413 | nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer); | ||
414 | nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer); | ||
415 | |||
416 | memset(gr_ctx, 0, sizeof(*gr_ctx)); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) | ||
421 | { | ||
422 | struct tsg_gk20a *tsg; | ||
423 | struct nvgpu_gr_ctx *gr_ctx; | ||
424 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
425 | struct tegra_vgpu_channel_bind_gr_ctx_params *p = | ||
426 | &msg.params.ch_bind_gr_ctx; | ||
427 | int err; | ||
428 | |||
429 | tsg = tsg_gk20a_from_ch(c); | ||
430 | if (!tsg) | ||
431 | return -EINVAL; | ||
432 | |||
433 | gr_ctx = &tsg->gr_ctx; | ||
434 | |||
435 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; | ||
436 | msg.handle = vgpu_get_handle(c->g); | ||
437 | p->ch_handle = c->virt_ctx; | ||
438 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
439 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
440 | err = err ? err : msg.ret; | ||
441 | WARN_ON(err); | ||
442 | |||
443 | return err; | ||
444 | } | ||
445 | |||
446 | static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) | ||
447 | { | ||
448 | struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; | ||
449 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
450 | struct tegra_vgpu_tsg_bind_gr_ctx_params *p = | ||
451 | &msg.params.tsg_bind_gr_ctx; | ||
452 | int err; | ||
453 | |||
454 | msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX; | ||
455 | msg.handle = vgpu_get_handle(tsg->g); | ||
456 | p->tsg_id = tsg->tsgid; | ||
457 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
458 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
459 | err = err ? err : msg.ret; | ||
460 | WARN_ON(err); | ||
461 | |||
462 | return err; | ||
463 | } | ||
464 | |||
465 | int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | ||
466 | { | ||
467 | struct gk20a *g = c->g; | ||
468 | struct fifo_gk20a *f = &g->fifo; | ||
469 | struct nvgpu_gr_ctx *gr_ctx = NULL; | ||
470 | struct tsg_gk20a *tsg = NULL; | ||
471 | int err = 0; | ||
472 | |||
473 | gk20a_dbg_fn(""); | ||
474 | |||
475 | /* an address space needs to have been bound at this point.*/ | ||
476 | if (!gk20a_channel_as_bound(c)) { | ||
477 | nvgpu_err(g, "not bound to address space at time" | ||
478 | " of grctx allocation"); | ||
479 | return -EINVAL; | ||
480 | } | ||
481 | |||
482 | if (!g->ops.gr.is_valid_class(g, class_num)) { | ||
483 | nvgpu_err(g, "invalid obj class 0x%x", class_num); | ||
484 | err = -EINVAL; | ||
485 | goto out; | ||
486 | } | ||
487 | c->obj_class = class_num; | ||
488 | |||
489 | if (!gk20a_is_channel_marked_as_tsg(c)) | ||
490 | return -EINVAL; | ||
491 | |||
492 | tsg = &f->tsg[c->tsgid]; | ||
493 | gr_ctx = &tsg->gr_ctx; | ||
494 | |||
495 | if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { | ||
496 | tsg->vm = c->vm; | ||
497 | nvgpu_vm_get(tsg->vm); | ||
498 | err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, | ||
499 | c->vm, | ||
500 | class_num, | ||
501 | flags); | ||
502 | if (!err) { | ||
503 | gr_ctx->tsgid = tsg->tsgid; | ||
504 | err = vgpu_gr_tsg_bind_gr_ctx(tsg); | ||
505 | } | ||
506 | if (err) { | ||
507 | nvgpu_err(g, | ||
508 | "fail to allocate TSG gr ctx buffer, err=%d", err); | ||
509 | nvgpu_vm_put(tsg->vm); | ||
510 | tsg->vm = NULL; | ||
511 | goto out; | ||
512 | } | ||
513 | |||
514 | err = vgpu_gr_ch_bind_gr_ctx(c); | ||
515 | if (err) { | ||
516 | nvgpu_err(g, "fail to bind gr ctx buffer"); | ||
517 | goto out; | ||
518 | } | ||
519 | |||
520 | /* commit gr ctx buffer */ | ||
521 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); | ||
522 | if (err) { | ||
523 | nvgpu_err(g, "fail to commit gr ctx buffer"); | ||
524 | goto out; | ||
525 | } | ||
526 | |||
527 | /* allocate patch buffer */ | ||
528 | err = vgpu_gr_alloc_channel_patch_ctx(g, c); | ||
529 | if (err) { | ||
530 | nvgpu_err(g, "fail to allocate patch buffer"); | ||
531 | goto out; | ||
532 | } | ||
533 | |||
534 | /* map global buffer to channel gpu_va and commit */ | ||
535 | err = vgpu_gr_map_global_ctx_buffers(g, c); | ||
536 | if (err) { | ||
537 | nvgpu_err(g, "fail to map global ctx buffer"); | ||
538 | goto out; | ||
539 | } | ||
540 | |||
541 | err = vgpu_gr_commit_global_ctx_buffers(g, c, true); | ||
542 | if (err) { | ||
543 | nvgpu_err(g, "fail to commit global ctx buffers"); | ||
544 | goto out; | ||
545 | } | ||
546 | |||
547 | /* load golden image */ | ||
548 | err = gr_gk20a_elpg_protected_call(g, | ||
549 | vgpu_gr_load_golden_ctx_image(g, c)); | ||
550 | if (err) { | ||
551 | nvgpu_err(g, "fail to load golden ctx image"); | ||
552 | goto out; | ||
553 | } | ||
554 | } else { | ||
555 | err = vgpu_gr_ch_bind_gr_ctx(c); | ||
556 | if (err) { | ||
557 | nvgpu_err(g, "fail to bind gr ctx buffer"); | ||
558 | goto out; | ||
559 | } | ||
560 | |||
561 | /* commit gr ctx buffer */ | ||
562 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); | ||
563 | if (err) { | ||
564 | nvgpu_err(g, "fail to commit gr ctx buffer"); | ||
565 | goto out; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | /* PM ctxt switch is off by default */ | ||
570 | gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); | ||
571 | |||
572 | gk20a_dbg_fn("done"); | ||
573 | return 0; | ||
574 | out: | ||
575 | /* 1. gr_ctx, patch_ctx and global ctx buffer mapping | ||
576 | can be reused so no need to release them. | ||
577 | 2. golden image load is a one time thing so if | ||
578 | they pass, no need to undo. */ | ||
579 | nvgpu_err(g, "fail"); | ||
580 | return err; | ||
581 | } | ||
582 | |||
583 | static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | ||
584 | { | ||
585 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
586 | u32 gpc_index; | ||
587 | u32 sm_per_tpc; | ||
588 | int err = -ENOMEM; | ||
589 | |||
590 | gk20a_dbg_fn(""); | ||
591 | |||
592 | gr->max_gpc_count = priv->constants.max_gpc_count; | ||
593 | gr->gpc_count = priv->constants.gpc_count; | ||
594 | gr->max_tpc_per_gpc_count = priv->constants.max_tpc_per_gpc_count; | ||
595 | |||
596 | gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count; | ||
597 | |||
598 | gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); | ||
599 | if (!gr->gpc_tpc_count) | ||
600 | goto cleanup; | ||
601 | |||
602 | gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); | ||
603 | if (!gr->gpc_tpc_mask) | ||
604 | goto cleanup; | ||
605 | |||
606 | sm_per_tpc = priv->constants.sm_per_tpc; | ||
607 | gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * | ||
608 | gr->max_tpc_per_gpc_count * | ||
609 | sm_per_tpc * | ||
610 | sizeof(struct sm_info)); | ||
611 | if (!gr->sm_to_cluster) | ||
612 | goto cleanup; | ||
613 | |||
614 | gr->tpc_count = 0; | ||
615 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { | ||
616 | gr->gpc_tpc_count[gpc_index] = | ||
617 | priv->constants.gpc_tpc_count[gpc_index]; | ||
618 | |||
619 | gr->tpc_count += gr->gpc_tpc_count[gpc_index]; | ||
620 | |||
621 | if (g->ops.gr.get_gpc_tpc_mask) | ||
622 | gr->gpc_tpc_mask[gpc_index] = | ||
623 | g->ops.gr.get_gpc_tpc_mask(g, gpc_index); | ||
624 | } | ||
625 | |||
626 | g->ops.gr.bundle_cb_defaults(g); | ||
627 | g->ops.gr.cb_size_default(g); | ||
628 | g->ops.gr.calc_global_ctx_buffer_size(g); | ||
629 | err = g->ops.gr.init_fs_state(g); | ||
630 | if (err) | ||
631 | goto cleanup; | ||
632 | return 0; | ||
633 | cleanup: | ||
634 | nvgpu_err(g, "out of memory"); | ||
635 | |||
636 | nvgpu_kfree(g, gr->gpc_tpc_count); | ||
637 | gr->gpc_tpc_count = NULL; | ||
638 | |||
639 | nvgpu_kfree(g, gr->gpc_tpc_mask); | ||
640 | gr->gpc_tpc_mask = NULL; | ||
641 | |||
642 | return err; | ||
643 | } | ||
644 | |||
645 | int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, | ||
646 | struct channel_gk20a *c, u64 zcull_va, | ||
647 | u32 mode) | ||
648 | { | ||
649 | struct tegra_vgpu_cmd_msg msg; | ||
650 | struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; | ||
651 | int err; | ||
652 | |||
653 | gk20a_dbg_fn(""); | ||
654 | |||
655 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; | ||
656 | msg.handle = vgpu_get_handle(g); | ||
657 | p->handle = c->virt_ctx; | ||
658 | p->zcull_va = zcull_va; | ||
659 | p->mode = mode; | ||
660 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
661 | |||
662 | return (err || msg.ret) ? -ENOMEM : 0; | ||
663 | } | ||
664 | |||
665 | int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, | ||
666 | struct gr_zcull_info *zcull_params) | ||
667 | { | ||
668 | struct tegra_vgpu_cmd_msg msg; | ||
669 | struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; | ||
670 | int err; | ||
671 | |||
672 | gk20a_dbg_fn(""); | ||
673 | |||
674 | msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; | ||
675 | msg.handle = vgpu_get_handle(g); | ||
676 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
677 | if (err || msg.ret) | ||
678 | return -ENOMEM; | ||
679 | |||
680 | zcull_params->width_align_pixels = p->width_align_pixels; | ||
681 | zcull_params->height_align_pixels = p->height_align_pixels; | ||
682 | zcull_params->pixel_squares_by_aliquots = p->pixel_squares_by_aliquots; | ||
683 | zcull_params->aliquot_total = p->aliquot_total; | ||
684 | zcull_params->region_byte_multiplier = p->region_byte_multiplier; | ||
685 | zcull_params->region_header_size = p->region_header_size; | ||
686 | zcull_params->subregion_header_size = p->subregion_header_size; | ||
687 | zcull_params->subregion_width_align_pixels = | ||
688 | p->subregion_width_align_pixels; | ||
689 | zcull_params->subregion_height_align_pixels = | ||
690 | p->subregion_height_align_pixels; | ||
691 | zcull_params->subregion_count = p->subregion_count; | ||
692 | |||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) | ||
697 | { | ||
698 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
699 | |||
700 | return priv->constants.gpc_tpc_mask[gpc_index]; | ||
701 | } | ||
702 | |||
703 | u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) | ||
704 | { | ||
705 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
706 | |||
707 | gk20a_dbg_fn(""); | ||
708 | |||
709 | return priv->constants.num_fbps; | ||
710 | } | ||
711 | |||
712 | u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) | ||
713 | { | ||
714 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
715 | |||
716 | gk20a_dbg_fn(""); | ||
717 | |||
718 | return priv->constants.fbp_en_mask; | ||
719 | } | ||
720 | |||
721 | u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) | ||
722 | { | ||
723 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
724 | |||
725 | gk20a_dbg_fn(""); | ||
726 | |||
727 | return priv->constants.ltc_per_fbp; | ||
728 | } | ||
729 | |||
730 | u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) | ||
731 | { | ||
732 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
733 | |||
734 | gk20a_dbg_fn(""); | ||
735 | |||
736 | return priv->constants.max_lts_per_ltc; | ||
737 | } | ||
738 | |||
739 | u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g) | ||
740 | { | ||
741 | /* no one use it yet */ | ||
742 | return NULL; | ||
743 | } | ||
744 | |||
745 | int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | ||
746 | struct zbc_entry *zbc_val) | ||
747 | { | ||
748 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
749 | struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; | ||
750 | int err; | ||
751 | |||
752 | gk20a_dbg_fn(""); | ||
753 | |||
754 | msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; | ||
755 | msg.handle = vgpu_get_handle(g); | ||
756 | |||
757 | p->type = zbc_val->type; | ||
758 | p->format = zbc_val->format; | ||
759 | switch (p->type) { | ||
760 | case GK20A_ZBC_TYPE_COLOR: | ||
761 | memcpy(p->color_ds, zbc_val->color_ds, sizeof(p->color_ds)); | ||
762 | memcpy(p->color_l2, zbc_val->color_l2, sizeof(p->color_l2)); | ||
763 | break; | ||
764 | case GK20A_ZBC_TYPE_DEPTH: | ||
765 | p->depth = zbc_val->depth; | ||
766 | break; | ||
767 | default: | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | |||
771 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
772 | |||
773 | return (err || msg.ret) ? -ENOMEM : 0; | ||
774 | } | ||
775 | |||
776 | int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, | ||
777 | struct zbc_query_params *query_params) | ||
778 | { | ||
779 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
780 | struct tegra_vgpu_zbc_query_table_params *p = | ||
781 | &msg.params.zbc_query_table; | ||
782 | int err; | ||
783 | |||
784 | gk20a_dbg_fn(""); | ||
785 | |||
786 | msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; | ||
787 | msg.handle = vgpu_get_handle(g); | ||
788 | |||
789 | p->type = query_params->type; | ||
790 | p->index_size = query_params->index_size; | ||
791 | |||
792 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
793 | if (err || msg.ret) | ||
794 | return -ENOMEM; | ||
795 | |||
796 | switch (query_params->type) { | ||
797 | case GK20A_ZBC_TYPE_COLOR: | ||
798 | memcpy(query_params->color_ds, p->color_ds, | ||
799 | sizeof(query_params->color_ds)); | ||
800 | memcpy(query_params->color_l2, p->color_l2, | ||
801 | sizeof(query_params->color_l2)); | ||
802 | break; | ||
803 | case GK20A_ZBC_TYPE_DEPTH: | ||
804 | query_params->depth = p->depth; | ||
805 | break; | ||
806 | case GK20A_ZBC_TYPE_INVALID: | ||
807 | query_params->index_size = p->index_size; | ||
808 | break; | ||
809 | default: | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | query_params->ref_cnt = p->ref_cnt; | ||
813 | query_params->format = p->format; | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | static void vgpu_remove_gr_support(struct gr_gk20a *gr) | ||
819 | { | ||
820 | gk20a_dbg_fn(""); | ||
821 | |||
822 | gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); | ||
823 | |||
824 | nvgpu_kfree(gr->g, gr->sm_error_states); | ||
825 | gr->sm_error_states = NULL; | ||
826 | |||
827 | nvgpu_kfree(gr->g, gr->gpc_tpc_mask); | ||
828 | gr->gpc_tpc_mask = NULL; | ||
829 | |||
830 | nvgpu_kfree(gr->g, gr->sm_to_cluster); | ||
831 | gr->sm_to_cluster = NULL; | ||
832 | |||
833 | nvgpu_kfree(gr->g, gr->gpc_tpc_count); | ||
834 | gr->gpc_tpc_count = NULL; | ||
835 | } | ||
836 | |||
837 | static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) | ||
838 | { | ||
839 | struct gr_gk20a *gr = &g->gr; | ||
840 | int err; | ||
841 | |||
842 | gk20a_dbg_fn(""); | ||
843 | |||
844 | if (gr->sw_ready) { | ||
845 | gk20a_dbg_fn("skip init"); | ||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | gr->g = g; | ||
850 | |||
851 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
852 | nvgpu_mutex_init(&g->gr.cs_lock); | ||
853 | #endif | ||
854 | |||
855 | err = vgpu_gr_init_gr_config(g, gr); | ||
856 | if (err) | ||
857 | goto clean_up; | ||
858 | |||
859 | err = g->ops.gr.init_ctx_state(g); | ||
860 | if (err) | ||
861 | goto clean_up; | ||
862 | |||
863 | err = g->ops.ltc.init_comptags(g, gr); | ||
864 | if (err) | ||
865 | goto clean_up; | ||
866 | |||
867 | err = vgpu_gr_alloc_global_ctx_buffers(g); | ||
868 | if (err) | ||
869 | goto clean_up; | ||
870 | |||
871 | nvgpu_mutex_init(&gr->ctx_mutex); | ||
872 | |||
873 | gr->sm_error_states = nvgpu_kzalloc(g, | ||
874 | sizeof(struct nvgpu_gr_sm_error_state) * | ||
875 | gr->no_of_sm); | ||
876 | if (!gr->sm_error_states) { | ||
877 | err = -ENOMEM; | ||
878 | goto clean_up; | ||
879 | } | ||
880 | |||
881 | gr->remove_support = vgpu_remove_gr_support; | ||
882 | gr->sw_ready = true; | ||
883 | |||
884 | gk20a_dbg_fn("done"); | ||
885 | return 0; | ||
886 | |||
887 | clean_up: | ||
888 | nvgpu_err(g, "fail"); | ||
889 | vgpu_remove_gr_support(gr); | ||
890 | return err; | ||
891 | } | ||
892 | |||
893 | int vgpu_init_gr_support(struct gk20a *g) | ||
894 | { | ||
895 | gk20a_dbg_fn(""); | ||
896 | |||
897 | return vgpu_gr_init_gr_setup_sw(g); | ||
898 | } | ||
899 | |||
900 | int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) | ||
901 | { | ||
902 | struct fifo_gk20a *f = &g->fifo; | ||
903 | struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); | ||
904 | |||
905 | gk20a_dbg_fn(""); | ||
906 | if (!ch) | ||
907 | return 0; | ||
908 | |||
909 | if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY && | ||
910 | info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE) | ||
911 | nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid); | ||
912 | |||
913 | switch (info->type) { | ||
914 | case TEGRA_VGPU_GR_INTR_NOTIFY: | ||
915 | nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); | ||
916 | break; | ||
917 | case TEGRA_VGPU_GR_INTR_SEMAPHORE: | ||
918 | nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq); | ||
919 | break; | ||
920 | case TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT: | ||
921 | nvgpu_set_error_notifier(ch, | ||
922 | NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); | ||
923 | break; | ||
924 | case TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY: | ||
925 | nvgpu_set_error_notifier(ch, | ||
926 | NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); | ||
927 | case TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD: | ||
928 | break; | ||
929 | case TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS: | ||
930 | nvgpu_set_error_notifier(ch, | ||
931 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | ||
932 | break; | ||
933 | case TEGRA_VGPU_GR_INTR_FECS_ERROR: | ||
934 | break; | ||
935 | case TEGRA_VGPU_GR_INTR_CLASS_ERROR: | ||
936 | nvgpu_set_error_notifier(ch, | ||
937 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | ||
938 | break; | ||
939 | case TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD: | ||
940 | nvgpu_set_error_notifier(ch, | ||
941 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | ||
942 | break; | ||
943 | case TEGRA_VGPU_GR_INTR_EXCEPTION: | ||
944 | nvgpu_set_error_notifier(ch, | ||
945 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | ||
946 | break; | ||
947 | case TEGRA_VGPU_GR_INTR_SM_EXCEPTION: | ||
948 | gk20a_dbg_gpu_post_events(ch); | ||
949 | break; | ||
950 | default: | ||
951 | WARN_ON(1); | ||
952 | break; | ||
953 | } | ||
954 | |||
955 | gk20a_channel_put(ch); | ||
956 | return 0; | ||
957 | } | ||
958 | |||
959 | int vgpu_gr_nonstall_isr(struct gk20a *g, | ||
960 | struct tegra_vgpu_gr_nonstall_intr_info *info) | ||
961 | { | ||
962 | gk20a_dbg_fn(""); | ||
963 | |||
964 | switch (info->type) { | ||
965 | case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: | ||
966 | gk20a_channel_semaphore_wakeup(g, true); | ||
967 | break; | ||
968 | default: | ||
969 | WARN_ON(1); | ||
970 | break; | ||
971 | } | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | int vgpu_gr_set_sm_debug_mode(struct gk20a *g, | ||
977 | struct channel_gk20a *ch, u64 sms, bool enable) | ||
978 | { | ||
979 | struct tegra_vgpu_cmd_msg msg; | ||
980 | struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; | ||
981 | int err; | ||
982 | |||
983 | gk20a_dbg_fn(""); | ||
984 | |||
985 | msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; | ||
986 | msg.handle = vgpu_get_handle(g); | ||
987 | p->handle = ch->virt_ctx; | ||
988 | p->sms = sms; | ||
989 | p->enable = (u32)enable; | ||
990 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
991 | WARN_ON(err || msg.ret); | ||
992 | |||
993 | return err ? err : msg.ret; | ||
994 | } | ||
995 | |||
996 | int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, | ||
997 | struct channel_gk20a *ch, bool enable) | ||
998 | { | ||
999 | struct tegra_vgpu_cmd_msg msg; | ||
1000 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; | ||
1001 | int err; | ||
1002 | |||
1003 | gk20a_dbg_fn(""); | ||
1004 | |||
1005 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; | ||
1006 | msg.handle = vgpu_get_handle(g); | ||
1007 | p->handle = ch->virt_ctx; | ||
1008 | |||
1009 | if (enable) | ||
1010 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; | ||
1011 | else | ||
1012 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; | ||
1013 | |||
1014 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1015 | WARN_ON(err || msg.ret); | ||
1016 | |||
1017 | return err ? err : msg.ret; | ||
1018 | } | ||
1019 | |||
1020 | int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, | ||
1021 | struct channel_gk20a *ch, bool enable) | ||
1022 | { | ||
1023 | struct tsg_gk20a *tsg; | ||
1024 | struct nvgpu_gr_ctx *ch_ctx; | ||
1025 | struct pm_ctx_desc *pm_ctx; | ||
1026 | struct tegra_vgpu_cmd_msg msg; | ||
1027 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; | ||
1028 | int err; | ||
1029 | |||
1030 | gk20a_dbg_fn(""); | ||
1031 | |||
1032 | tsg = tsg_gk20a_from_ch(ch); | ||
1033 | if (!tsg) | ||
1034 | return -EINVAL; | ||
1035 | |||
1036 | ch_ctx = &tsg->gr_ctx; | ||
1037 | pm_ctx = &ch_ctx->pm_ctx; | ||
1038 | |||
1039 | if (enable) { | ||
1040 | /* | ||
1041 | * send command to enable HWPM only once - otherwise server | ||
1042 | * will return an error due to using the same GPU VA twice. | ||
1043 | */ | ||
1044 | if (pm_ctx->pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) | ||
1045 | return 0; | ||
1046 | |||
1047 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; | ||
1048 | |||
1049 | /* Allocate buffer if necessary */ | ||
1050 | if (pm_ctx->mem.gpu_va == 0) { | ||
1051 | pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, | ||
1052 | g->gr.ctx_vars.pm_ctxsw_image_size, | ||
1053 | gmmu_page_size_kernel); | ||
1054 | |||
1055 | if (!pm_ctx->mem.gpu_va) | ||
1056 | return -ENOMEM; | ||
1057 | pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; | ||
1058 | } | ||
1059 | } else { | ||
1060 | if (pm_ctx->pm_mode == ctxsw_prog_main_image_pm_mode_no_ctxsw_f()) | ||
1061 | return 0; | ||
1062 | |||
1063 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; | ||
1064 | } | ||
1065 | |||
1066 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; | ||
1067 | msg.handle = vgpu_get_handle(g); | ||
1068 | p->handle = ch->virt_ctx; | ||
1069 | p->gpu_va = pm_ctx->mem.gpu_va; | ||
1070 | |||
1071 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1072 | WARN_ON(err || msg.ret); | ||
1073 | err = err ? err : msg.ret; | ||
1074 | if (!err) | ||
1075 | pm_ctx->pm_mode = enable ? | ||
1076 | ctxsw_prog_main_image_pm_mode_ctxsw_f() : | ||
1077 | ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); | ||
1078 | |||
1079 | return err; | ||
1080 | } | ||
1081 | |||
1082 | int vgpu_gr_clear_sm_error_state(struct gk20a *g, | ||
1083 | struct channel_gk20a *ch, u32 sm_id) | ||
1084 | { | ||
1085 | struct gr_gk20a *gr = &g->gr; | ||
1086 | struct tegra_vgpu_cmd_msg msg; | ||
1087 | struct tegra_vgpu_clear_sm_error_state *p = | ||
1088 | &msg.params.clear_sm_error_state; | ||
1089 | int err; | ||
1090 | |||
1091 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1092 | msg.cmd = TEGRA_VGPU_CMD_CLEAR_SM_ERROR_STATE; | ||
1093 | msg.handle = vgpu_get_handle(g); | ||
1094 | p->handle = ch->virt_ctx; | ||
1095 | p->sm_id = sm_id; | ||
1096 | |||
1097 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1098 | WARN_ON(err || msg.ret); | ||
1099 | |||
1100 | memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); | ||
1101 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1102 | |||
1103 | return err ? err : msg.ret; | ||
1104 | |||
1105 | |||
1106 | return 0; | ||
1107 | } | ||
1108 | |||
1109 | static int vgpu_gr_suspend_resume_contexts(struct gk20a *g, | ||
1110 | struct dbg_session_gk20a *dbg_s, | ||
1111 | int *ctx_resident_ch_fd, u32 cmd) | ||
1112 | { | ||
1113 | struct dbg_session_channel_data *ch_data; | ||
1114 | struct tegra_vgpu_cmd_msg msg; | ||
1115 | struct tegra_vgpu_suspend_resume_contexts *p; | ||
1116 | size_t n; | ||
1117 | int channel_fd = -1; | ||
1118 | int err = 0; | ||
1119 | void *handle = NULL; | ||
1120 | u16 *oob; | ||
1121 | size_t oob_size; | ||
1122 | |||
1123 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1124 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1125 | |||
1126 | handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), | ||
1127 | TEGRA_VGPU_QUEUE_CMD, | ||
1128 | (void **)&oob, &oob_size); | ||
1129 | if (!handle) { | ||
1130 | err = -EINVAL; | ||
1131 | goto done; | ||
1132 | } | ||
1133 | |||
1134 | n = 0; | ||
1135 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
1136 | dbg_session_channel_data, ch_entry) | ||
1137 | n++; | ||
1138 | |||
1139 | if (oob_size < n * sizeof(u16)) { | ||
1140 | err = -ENOMEM; | ||
1141 | goto done; | ||
1142 | } | ||
1143 | |||
1144 | msg.cmd = cmd; | ||
1145 | msg.handle = vgpu_get_handle(g); | ||
1146 | p = &msg.params.suspend_contexts; | ||
1147 | p->num_channels = n; | ||
1148 | n = 0; | ||
1149 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
1150 | dbg_session_channel_data, ch_entry) | ||
1151 | oob[n++] = (u16)ch_data->chid; | ||
1152 | |||
1153 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1154 | if (err || msg.ret) { | ||
1155 | err = -ENOMEM; | ||
1156 | goto done; | ||
1157 | } | ||
1158 | |||
1159 | if (p->resident_chid != (u16)~0) { | ||
1160 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
1161 | dbg_session_channel_data, ch_entry) { | ||
1162 | if (ch_data->chid == p->resident_chid) { | ||
1163 | channel_fd = ch_data->channel_fd; | ||
1164 | break; | ||
1165 | } | ||
1166 | } | ||
1167 | } | ||
1168 | |||
1169 | done: | ||
1170 | if (handle) | ||
1171 | vgpu_ivc_oob_put_ptr(handle); | ||
1172 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1173 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1174 | *ctx_resident_ch_fd = channel_fd; | ||
1175 | return err; | ||
1176 | } | ||
1177 | |||
1178 | int vgpu_gr_suspend_contexts(struct gk20a *g, | ||
1179 | struct dbg_session_gk20a *dbg_s, | ||
1180 | int *ctx_resident_ch_fd) | ||
1181 | { | ||
1182 | return vgpu_gr_suspend_resume_contexts(g, dbg_s, | ||
1183 | ctx_resident_ch_fd, TEGRA_VGPU_CMD_SUSPEND_CONTEXTS); | ||
1184 | } | ||
1185 | |||
1186 | int vgpu_gr_resume_contexts(struct gk20a *g, | ||
1187 | struct dbg_session_gk20a *dbg_s, | ||
1188 | int *ctx_resident_ch_fd) | ||
1189 | { | ||
1190 | return vgpu_gr_suspend_resume_contexts(g, dbg_s, | ||
1191 | ctx_resident_ch_fd, TEGRA_VGPU_CMD_RESUME_CONTEXTS); | ||
1192 | } | ||
1193 | |||
1194 | void vgpu_gr_handle_sm_esr_event(struct gk20a *g, | ||
1195 | struct tegra_vgpu_sm_esr_info *info) | ||
1196 | { | ||
1197 | struct nvgpu_gr_sm_error_state *sm_error_states; | ||
1198 | |||
1199 | if (info->sm_id >= g->gr.no_of_sm) { | ||
1200 | nvgpu_err(g, "invalid smd_id %d / %d", | ||
1201 | info->sm_id, g->gr.no_of_sm); | ||
1202 | return; | ||
1203 | } | ||
1204 | |||
1205 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1206 | |||
1207 | sm_error_states = &g->gr.sm_error_states[info->sm_id]; | ||
1208 | |||
1209 | sm_error_states->hww_global_esr = info->hww_global_esr; | ||
1210 | sm_error_states->hww_warp_esr = info->hww_warp_esr; | ||
1211 | sm_error_states->hww_warp_esr_pc = info->hww_warp_esr_pc; | ||
1212 | sm_error_states->hww_global_esr_report_mask = | ||
1213 | info->hww_global_esr_report_mask; | ||
1214 | sm_error_states->hww_warp_esr_report_mask = | ||
1215 | info->hww_warp_esr_report_mask; | ||
1216 | |||
1217 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1218 | } | ||
1219 | |||
1220 | int vgpu_gr_init_sm_id_table(struct gk20a *g) | ||
1221 | { | ||
1222 | struct tegra_vgpu_cmd_msg msg = {}; | ||
1223 | struct tegra_vgpu_vsms_mapping_params *p = &msg.params.vsms_mapping; | ||
1224 | struct tegra_vgpu_vsms_mapping_entry *entry; | ||
1225 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
1226 | struct sm_info *sm_info; | ||
1227 | int err; | ||
1228 | struct gr_gk20a *gr = &g->gr; | ||
1229 | size_t oob_size; | ||
1230 | void *handle = NULL; | ||
1231 | u32 sm_id; | ||
1232 | u32 max_sm; | ||
1233 | |||
1234 | msg.cmd = TEGRA_VGPU_CMD_GET_VSMS_MAPPING; | ||
1235 | msg.handle = vgpu_get_handle(g); | ||
1236 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1237 | err = err ? err : msg.ret; | ||
1238 | if (err) { | ||
1239 | nvgpu_err(g, "get vsms mapping failed err %d", err); | ||
1240 | return err; | ||
1241 | } | ||
1242 | |||
1243 | handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), | ||
1244 | TEGRA_VGPU_QUEUE_CMD, | ||
1245 | (void **)&entry, &oob_size); | ||
1246 | if (!handle) | ||
1247 | return -EINVAL; | ||
1248 | |||
1249 | max_sm = gr->gpc_count * | ||
1250 | gr->max_tpc_per_gpc_count * | ||
1251 | priv->constants.sm_per_tpc; | ||
1252 | if (p->num_sm > max_sm) | ||
1253 | return -EINVAL; | ||
1254 | |||
1255 | if ((p->num_sm * sizeof(*entry)) > oob_size) | ||
1256 | return -EINVAL; | ||
1257 | |||
1258 | gr->no_of_sm = p->num_sm; | ||
1259 | for (sm_id = 0; sm_id < p->num_sm; sm_id++, entry++) { | ||
1260 | sm_info = &gr->sm_to_cluster[sm_id]; | ||
1261 | sm_info->tpc_index = entry->tpc_index; | ||
1262 | sm_info->gpc_index = entry->gpc_index; | ||
1263 | sm_info->sm_index = entry->sm_index; | ||
1264 | sm_info->global_tpc_index = entry->global_tpc_index; | ||
1265 | } | ||
1266 | vgpu_ivc_oob_put_ptr(handle); | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | int vgpu_gr_init_fs_state(struct gk20a *g) | ||
1272 | { | ||
1273 | if (!g->ops.gr.init_sm_id_table) | ||
1274 | return -EINVAL; | ||
1275 | |||
1276 | return g->ops.gr.init_sm_id_table(g); | ||
1277 | } | ||