diff options
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 1220 |
1 files changed, 1220 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c new file mode 100644 index 00000000..5dc6f68e --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c | |||
@@ -0,0 +1,1220 @@ | |||
1 | /* | ||
2 | * Virtualized GPU Graphics | ||
3 | * | ||
4 | * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <uapi/linux/nvgpu.h> | ||
26 | |||
27 | #include <nvgpu/kmem.h> | ||
28 | #include <nvgpu/bug.h> | ||
29 | |||
30 | #include "vgpu/vgpu.h" | ||
31 | #include "vgpu/gr_vgpu.h" | ||
32 | #include "gk20a/dbg_gpu_gk20a.h" | ||
33 | |||
34 | #include <nvgpu/hw/gk20a/hw_gr_gk20a.h> | ||
35 | |||
36 | void vgpu_gr_detect_sm_arch(struct gk20a *g) | ||
37 | { | ||
38 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
39 | |||
40 | gk20a_dbg_fn(""); | ||
41 | |||
42 | g->params.sm_arch_sm_version = | ||
43 | priv->constants.sm_arch_sm_version; | ||
44 | g->params.sm_arch_spa_version = | ||
45 | priv->constants.sm_arch_spa_version; | ||
46 | g->params.sm_arch_warp_count = | ||
47 | priv->constants.sm_arch_warp_count; | ||
48 | } | ||
49 | |||
50 | int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) | ||
51 | { | ||
52 | struct tegra_vgpu_cmd_msg msg; | ||
53 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
54 | int err; | ||
55 | |||
56 | gk20a_dbg_fn(""); | ||
57 | |||
58 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; | ||
59 | msg.handle = vgpu_get_handle(c->g); | ||
60 | p->handle = c->virt_ctx; | ||
61 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
62 | |||
63 | return (err || msg.ret) ? -1 : 0; | ||
64 | } | ||
65 | |||
66 | static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, | ||
67 | struct channel_gk20a *c, bool patch) | ||
68 | { | ||
69 | struct tegra_vgpu_cmd_msg msg; | ||
70 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
71 | int err; | ||
72 | |||
73 | gk20a_dbg_fn(""); | ||
74 | |||
75 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; | ||
76 | msg.handle = vgpu_get_handle(g); | ||
77 | p->handle = c->virt_ctx; | ||
78 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
79 | |||
80 | return (err || msg.ret) ? -1 : 0; | ||
81 | } | ||
82 | |||
83 | /* load saved fresh copy of gloden image into channel gr_ctx */ | ||
84 | static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, | ||
85 | struct channel_gk20a *c) | ||
86 | { | ||
87 | struct tegra_vgpu_cmd_msg msg; | ||
88 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
89 | int err; | ||
90 | |||
91 | gk20a_dbg_fn(""); | ||
92 | |||
93 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; | ||
94 | msg.handle = vgpu_get_handle(g); | ||
95 | p->handle = c->virt_ctx; | ||
96 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
97 | |||
98 | return (err || msg.ret) ? -1 : 0; | ||
99 | } | ||
100 | |||
101 | int vgpu_gr_init_ctx_state(struct gk20a *g) | ||
102 | { | ||
103 | struct gr_gk20a *gr = &g->gr; | ||
104 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
105 | |||
106 | gk20a_dbg_fn(""); | ||
107 | |||
108 | g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; | ||
109 | g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; | ||
110 | g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size; | ||
111 | if (!g->gr.ctx_vars.golden_image_size || | ||
112 | !g->gr.ctx_vars.zcull_ctxsw_image_size || | ||
113 | !g->gr.ctx_vars.pm_ctxsw_image_size) | ||
114 | return -ENXIO; | ||
115 | |||
116 | gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; | ||
117 | g->gr.ctx_vars.priv_access_map_size = 512 * 1024; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) | ||
122 | { | ||
123 | struct gr_gk20a *gr = &g->gr; | ||
124 | int attr_buffer_size; | ||
125 | |||
126 | u32 cb_buffer_size = gr->bundle_cb_default_size * | ||
127 | gr_scc_bundle_cb_size_div_256b_byte_granularity_v(); | ||
128 | |||
129 | u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * | ||
130 | gr_scc_pagepool_total_pages_byte_granularity_v(); | ||
131 | |||
132 | gk20a_dbg_fn(""); | ||
133 | |||
134 | attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); | ||
135 | |||
136 | gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); | ||
137 | gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; | ||
138 | |||
139 | gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); | ||
140 | gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; | ||
141 | |||
142 | gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); | ||
143 | gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; | ||
144 | |||
145 | gk20a_dbg_info("priv access map size : %d", | ||
146 | gr->ctx_vars.priv_access_map_size); | ||
147 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = | ||
148 | gr->ctx_vars.priv_access_map_size; | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, | ||
154 | struct channel_gk20a *c) | ||
155 | { | ||
156 | struct tegra_vgpu_cmd_msg msg; | ||
157 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
158 | struct vm_gk20a *ch_vm = c->vm; | ||
159 | u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; | ||
160 | u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; | ||
161 | struct gr_gk20a *gr = &g->gr; | ||
162 | u64 gpu_va; | ||
163 | u32 i; | ||
164 | int err; | ||
165 | |||
166 | gk20a_dbg_fn(""); | ||
167 | |||
168 | /* FIXME: add VPR support */ | ||
169 | |||
170 | /* Circular Buffer */ | ||
171 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
172 | gr->global_ctx_buffer[CIRCULAR].mem.size, | ||
173 | gmmu_page_size_kernel); | ||
174 | |||
175 | if (!gpu_va) | ||
176 | goto clean_up; | ||
177 | g_bfr_va[CIRCULAR_VA] = gpu_va; | ||
178 | g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; | ||
179 | |||
180 | /* Attribute Buffer */ | ||
181 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
182 | gr->global_ctx_buffer[ATTRIBUTE].mem.size, | ||
183 | gmmu_page_size_kernel); | ||
184 | |||
185 | if (!gpu_va) | ||
186 | goto clean_up; | ||
187 | g_bfr_va[ATTRIBUTE_VA] = gpu_va; | ||
188 | g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; | ||
189 | |||
190 | /* Page Pool */ | ||
191 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
192 | gr->global_ctx_buffer[PAGEPOOL].mem.size, | ||
193 | gmmu_page_size_kernel); | ||
194 | if (!gpu_va) | ||
195 | goto clean_up; | ||
196 | g_bfr_va[PAGEPOOL_VA] = gpu_va; | ||
197 | g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; | ||
198 | |||
199 | /* Priv register Access Map */ | ||
200 | gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
201 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, | ||
202 | gmmu_page_size_kernel); | ||
203 | if (!gpu_va) | ||
204 | goto clean_up; | ||
205 | g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; | ||
206 | g_bfr_size[PRIV_ACCESS_MAP_VA] = | ||
207 | gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; | ||
208 | |||
209 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; | ||
210 | msg.handle = vgpu_get_handle(g); | ||
211 | p->handle = c->virt_ctx; | ||
212 | p->cb_va = g_bfr_va[CIRCULAR_VA]; | ||
213 | p->attr_va = g_bfr_va[ATTRIBUTE_VA]; | ||
214 | p->page_pool_va = g_bfr_va[PAGEPOOL_VA]; | ||
215 | p->priv_access_map_va = g_bfr_va[PRIV_ACCESS_MAP_VA]; | ||
216 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
217 | if (err || msg.ret) | ||
218 | goto clean_up; | ||
219 | |||
220 | c->ch_ctx.global_ctx_buffer_mapped = true; | ||
221 | return 0; | ||
222 | |||
223 | clean_up: | ||
224 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { | ||
225 | if (g_bfr_va[i]) { | ||
226 | __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], | ||
227 | gmmu_page_size_kernel); | ||
228 | g_bfr_va[i] = 0; | ||
229 | } | ||
230 | } | ||
231 | return -ENOMEM; | ||
232 | } | ||
233 | |||
234 | static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) | ||
235 | { | ||
236 | struct vm_gk20a *ch_vm = c->vm; | ||
237 | u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; | ||
238 | u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; | ||
239 | u32 i; | ||
240 | |||
241 | gk20a_dbg_fn(""); | ||
242 | |||
243 | if (c->ch_ctx.global_ctx_buffer_mapped) { | ||
244 | struct tegra_vgpu_cmd_msg msg; | ||
245 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
246 | int err; | ||
247 | |||
248 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX; | ||
249 | msg.handle = vgpu_get_handle(c->g); | ||
250 | p->handle = c->virt_ctx; | ||
251 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
252 | WARN_ON(err || msg.ret); | ||
253 | } | ||
254 | |||
255 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { | ||
256 | if (g_bfr_va[i]) { | ||
257 | __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], | ||
258 | gmmu_page_size_kernel); | ||
259 | g_bfr_va[i] = 0; | ||
260 | g_bfr_size[i] = 0; | ||
261 | } | ||
262 | } | ||
263 | c->ch_ctx.global_ctx_buffer_mapped = false; | ||
264 | } | ||
265 | |||
266 | int vgpu_gr_alloc_gr_ctx(struct gk20a *g, | ||
267 | struct gr_ctx_desc **__gr_ctx, | ||
268 | struct vm_gk20a *vm, | ||
269 | u32 class, | ||
270 | u32 flags) | ||
271 | { | ||
272 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
273 | struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; | ||
274 | struct gr_gk20a *gr = &g->gr; | ||
275 | struct gr_ctx_desc *gr_ctx; | ||
276 | int err; | ||
277 | |||
278 | gk20a_dbg_fn(""); | ||
279 | |||
280 | if (gr->ctx_vars.buffer_size == 0) | ||
281 | return 0; | ||
282 | |||
283 | /* alloc channel gr ctx buffer */ | ||
284 | gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; | ||
285 | gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; | ||
286 | |||
287 | gr_ctx = nvgpu_kzalloc(g, sizeof(*gr_ctx)); | ||
288 | if (!gr_ctx) | ||
289 | return -ENOMEM; | ||
290 | |||
291 | gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; | ||
292 | gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, | ||
293 | gr_ctx->mem.size, | ||
294 | gmmu_page_size_kernel); | ||
295 | |||
296 | if (!gr_ctx->mem.gpu_va) { | ||
297 | nvgpu_kfree(g, gr_ctx); | ||
298 | return -ENOMEM; | ||
299 | } | ||
300 | |||
301 | msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; | ||
302 | msg.handle = vgpu_get_handle(g); | ||
303 | p->as_handle = vm->handle; | ||
304 | p->gr_ctx_va = gr_ctx->mem.gpu_va; | ||
305 | p->class_num = class; | ||
306 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
307 | err = err ? err : msg.ret; | ||
308 | |||
309 | if (unlikely(err)) { | ||
310 | nvgpu_err(g, "fail to alloc gr_ctx"); | ||
311 | __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, | ||
312 | gmmu_page_size_kernel); | ||
313 | nvgpu_kfree(g, gr_ctx); | ||
314 | } else { | ||
315 | gr_ctx->virt_ctx = p->gr_ctx_handle; | ||
316 | *__gr_ctx = gr_ctx; | ||
317 | } | ||
318 | |||
319 | return err; | ||
320 | } | ||
321 | |||
322 | void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, | ||
323 | struct gr_ctx_desc *gr_ctx) | ||
324 | { | ||
325 | gk20a_dbg_fn(""); | ||
326 | |||
327 | if (gr_ctx && gr_ctx->mem.gpu_va) { | ||
328 | struct tegra_vgpu_cmd_msg msg; | ||
329 | struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; | ||
330 | int err; | ||
331 | |||
332 | msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; | ||
333 | msg.handle = vgpu_get_handle(g); | ||
334 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
335 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
336 | WARN_ON(err || msg.ret); | ||
337 | |||
338 | __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, | ||
339 | gmmu_page_size_kernel); | ||
340 | nvgpu_kfree(g, gr_ctx); | ||
341 | } | ||
342 | } | ||
343 | |||
344 | static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c) | ||
345 | { | ||
346 | gk20a_dbg_fn(""); | ||
347 | |||
348 | c->g->ops.gr.free_gr_ctx(c->g, c->vm, c->ch_ctx.gr_ctx); | ||
349 | c->ch_ctx.gr_ctx = NULL; | ||
350 | } | ||
351 | |||
352 | static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, | ||
353 | struct channel_gk20a *c) | ||
354 | { | ||
355 | struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; | ||
356 | struct vm_gk20a *ch_vm = c->vm; | ||
357 | struct tegra_vgpu_cmd_msg msg; | ||
358 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
359 | int err; | ||
360 | |||
361 | gk20a_dbg_fn(""); | ||
362 | |||
363 | patch_ctx->mem.size = 128 * sizeof(u32); | ||
364 | patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, | ||
365 | patch_ctx->mem.size, | ||
366 | gmmu_page_size_kernel); | ||
367 | if (!patch_ctx->mem.gpu_va) | ||
368 | return -ENOMEM; | ||
369 | |||
370 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; | ||
371 | msg.handle = vgpu_get_handle(g); | ||
372 | p->handle = c->virt_ctx; | ||
373 | p->patch_ctx_va = patch_ctx->mem.gpu_va; | ||
374 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
375 | if (err || msg.ret) { | ||
376 | __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, | ||
377 | gmmu_page_size_kernel); | ||
378 | err = -ENOMEM; | ||
379 | } | ||
380 | |||
381 | return err; | ||
382 | } | ||
383 | |||
384 | static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) | ||
385 | { | ||
386 | struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; | ||
387 | struct vm_gk20a *ch_vm = c->vm; | ||
388 | |||
389 | gk20a_dbg_fn(""); | ||
390 | |||
391 | if (patch_ctx->mem.gpu_va) { | ||
392 | struct tegra_vgpu_cmd_msg msg; | ||
393 | struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; | ||
394 | int err; | ||
395 | |||
396 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX; | ||
397 | msg.handle = vgpu_get_handle(c->g); | ||
398 | p->handle = c->virt_ctx; | ||
399 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
400 | WARN_ON(err || msg.ret); | ||
401 | |||
402 | __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, | ||
403 | gmmu_page_size_kernel); | ||
404 | patch_ctx->mem.gpu_va = 0; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) | ||
409 | { | ||
410 | struct tegra_vgpu_cmd_msg msg; | ||
411 | struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; | ||
412 | struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; | ||
413 | struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; | ||
414 | int err; | ||
415 | |||
416 | gk20a_dbg_fn(""); | ||
417 | |||
418 | /* check if hwpm was ever initialized. If not, nothing to do */ | ||
419 | if (pm_ctx->mem.gpu_va == 0) | ||
420 | return; | ||
421 | |||
422 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; | ||
423 | msg.handle = vgpu_get_handle(c->g); | ||
424 | p->handle = c->virt_ctx; | ||
425 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
426 | WARN_ON(err || msg.ret); | ||
427 | |||
428 | __nvgpu_vm_free_va(c->vm, pm_ctx->mem.gpu_va, | ||
429 | gmmu_page_size_kernel); | ||
430 | pm_ctx->mem.gpu_va = 0; | ||
431 | } | ||
432 | |||
433 | void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg) | ||
434 | { | ||
435 | gk20a_dbg_fn(""); | ||
436 | |||
437 | if (c->g->ops.fifo.free_channel_ctx_header) | ||
438 | c->g->ops.fifo.free_channel_ctx_header(c); | ||
439 | vgpu_gr_unmap_global_ctx_buffers(c); | ||
440 | vgpu_gr_free_channel_patch_ctx(c); | ||
441 | vgpu_gr_free_channel_pm_ctx(c); | ||
442 | if (!is_tsg) | ||
443 | vgpu_gr_free_channel_gr_ctx(c); | ||
444 | |||
445 | /* zcull_ctx, pm_ctx */ | ||
446 | |||
447 | memset(&c->ch_ctx, 0, sizeof(struct channel_ctx_gk20a)); | ||
448 | |||
449 | c->first_init = false; | ||
450 | } | ||
451 | |||
452 | static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) | ||
453 | { | ||
454 | struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx; | ||
455 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
456 | struct tegra_vgpu_channel_bind_gr_ctx_params *p = | ||
457 | &msg.params.ch_bind_gr_ctx; | ||
458 | int err; | ||
459 | |||
460 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; | ||
461 | msg.handle = vgpu_get_handle(c->g); | ||
462 | p->ch_handle = c->virt_ctx; | ||
463 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
464 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
465 | err = err ? err : msg.ret; | ||
466 | WARN_ON(err); | ||
467 | |||
468 | return err; | ||
469 | } | ||
470 | |||
471 | static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) | ||
472 | { | ||
473 | struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx; | ||
474 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
475 | struct tegra_vgpu_tsg_bind_gr_ctx_params *p = | ||
476 | &msg.params.tsg_bind_gr_ctx; | ||
477 | int err; | ||
478 | |||
479 | msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX; | ||
480 | msg.handle = vgpu_get_handle(tsg->g); | ||
481 | p->tsg_id = tsg->tsgid; | ||
482 | p->gr_ctx_handle = gr_ctx->virt_ctx; | ||
483 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
484 | err = err ? err : msg.ret; | ||
485 | WARN_ON(err); | ||
486 | |||
487 | return err; | ||
488 | } | ||
489 | |||
490 | int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | ||
491 | { | ||
492 | struct gk20a *g = c->g; | ||
493 | struct fifo_gk20a *f = &g->fifo; | ||
494 | struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; | ||
495 | struct tsg_gk20a *tsg = NULL; | ||
496 | int err = 0; | ||
497 | |||
498 | gk20a_dbg_fn(""); | ||
499 | |||
500 | /* an address space needs to have been bound at this point.*/ | ||
501 | if (!gk20a_channel_as_bound(c)) { | ||
502 | nvgpu_err(g, "not bound to address space at time" | ||
503 | " of grctx allocation"); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | |||
507 | if (!g->ops.gr.is_valid_class(g, class_num)) { | ||
508 | nvgpu_err(g, "invalid obj class 0x%x", class_num); | ||
509 | err = -EINVAL; | ||
510 | goto out; | ||
511 | } | ||
512 | c->obj_class = class_num; | ||
513 | |||
514 | if (gk20a_is_channel_marked_as_tsg(c)) | ||
515 | tsg = &f->tsg[c->tsgid]; | ||
516 | |||
517 | if (!tsg) { | ||
518 | /* allocate gr ctx buffer */ | ||
519 | if (!ch_ctx->gr_ctx) { | ||
520 | err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx, | ||
521 | c->vm, | ||
522 | class_num, | ||
523 | flags); | ||
524 | if (!err) | ||
525 | err = vgpu_gr_ch_bind_gr_ctx(c); | ||
526 | if (err) { | ||
527 | nvgpu_err(g, "fail to allocate gr ctx buffer"); | ||
528 | goto out; | ||
529 | } | ||
530 | } else { | ||
531 | /*TBD: needs to be more subtle about which is | ||
532 | * being allocated as some are allowed to be | ||
533 | * allocated along same channel */ | ||
534 | nvgpu_err(g, | ||
535 | "too many classes alloc'd on same channel"); | ||
536 | err = -EINVAL; | ||
537 | goto out; | ||
538 | } | ||
539 | } else { | ||
540 | if (!tsg->tsg_gr_ctx) { | ||
541 | tsg->vm = c->vm; | ||
542 | nvgpu_vm_get(tsg->vm); | ||
543 | err = g->ops.gr.alloc_gr_ctx(g, &tsg->tsg_gr_ctx, | ||
544 | c->vm, | ||
545 | class_num, | ||
546 | flags); | ||
547 | if (!err) | ||
548 | err = vgpu_gr_tsg_bind_gr_ctx(tsg); | ||
549 | if (err) { | ||
550 | nvgpu_err(g, | ||
551 | "fail to allocate TSG gr ctx buffer, err=%d", err); | ||
552 | nvgpu_vm_put(tsg->vm); | ||
553 | tsg->vm = NULL; | ||
554 | goto out; | ||
555 | } | ||
556 | } | ||
557 | |||
558 | ch_ctx->gr_ctx = tsg->tsg_gr_ctx; | ||
559 | err = vgpu_gr_ch_bind_gr_ctx(c); | ||
560 | if (err) { | ||
561 | nvgpu_err(g, "fail to bind gr ctx buffer"); | ||
562 | goto out; | ||
563 | } | ||
564 | } | ||
565 | |||
566 | /* commit gr ctx buffer */ | ||
567 | err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); | ||
568 | if (err) { | ||
569 | nvgpu_err(g, "fail to commit gr ctx buffer"); | ||
570 | goto out; | ||
571 | } | ||
572 | |||
573 | /* allocate patch buffer */ | ||
574 | if (ch_ctx->patch_ctx.mem.priv.pages == NULL) { | ||
575 | err = vgpu_gr_alloc_channel_patch_ctx(g, c); | ||
576 | if (err) { | ||
577 | nvgpu_err(g, "fail to allocate patch buffer"); | ||
578 | goto out; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* map global buffer to channel gpu_va and commit */ | ||
583 | if (!ch_ctx->global_ctx_buffer_mapped) { | ||
584 | err = vgpu_gr_map_global_ctx_buffers(g, c); | ||
585 | if (err) { | ||
586 | nvgpu_err(g, "fail to map global ctx buffer"); | ||
587 | goto out; | ||
588 | } | ||
589 | gr_gk20a_elpg_protected_call(g, | ||
590 | vgpu_gr_commit_global_ctx_buffers(g, c, true)); | ||
591 | } | ||
592 | |||
593 | /* load golden image */ | ||
594 | if (!c->first_init) { | ||
595 | err = gr_gk20a_elpg_protected_call(g, | ||
596 | vgpu_gr_load_golden_ctx_image(g, c)); | ||
597 | if (err) { | ||
598 | nvgpu_err(g, "fail to load golden ctx image"); | ||
599 | goto out; | ||
600 | } | ||
601 | c->first_init = true; | ||
602 | } | ||
603 | |||
604 | gk20a_dbg_fn("done"); | ||
605 | return 0; | ||
606 | out: | ||
607 | /* 1. gr_ctx, patch_ctx and global ctx buffer mapping | ||
608 | can be reused so no need to release them. | ||
609 | 2. golden image load is a one time thing so if | ||
610 | they pass, no need to undo. */ | ||
611 | nvgpu_err(g, "fail"); | ||
612 | return err; | ||
613 | } | ||
614 | |||
615 | static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | ||
616 | { | ||
617 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
618 | u32 gpc_index; | ||
619 | int err = -ENOMEM; | ||
620 | |||
621 | gk20a_dbg_fn(""); | ||
622 | |||
623 | gr->max_gpc_count = priv->constants.max_gpc_count; | ||
624 | gr->gpc_count = priv->constants.gpc_count; | ||
625 | gr->max_tpc_per_gpc_count = priv->constants.max_tpc_per_gpc_count; | ||
626 | |||
627 | gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count; | ||
628 | |||
629 | gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); | ||
630 | if (!gr->gpc_tpc_count) | ||
631 | goto cleanup; | ||
632 | |||
633 | gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); | ||
634 | if (!gr->gpc_tpc_mask) | ||
635 | goto cleanup; | ||
636 | |||
637 | gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * | ||
638 | gr->max_tpc_per_gpc_count * | ||
639 | sizeof(struct sm_info)); | ||
640 | if (!gr->sm_to_cluster) | ||
641 | goto cleanup; | ||
642 | |||
643 | gr->tpc_count = 0; | ||
644 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { | ||
645 | gr->gpc_tpc_count[gpc_index] = | ||
646 | priv->constants.gpc_tpc_count[gpc_index]; | ||
647 | |||
648 | gr->tpc_count += gr->gpc_tpc_count[gpc_index]; | ||
649 | |||
650 | if (g->ops.gr.get_gpc_tpc_mask) | ||
651 | gr->gpc_tpc_mask[gpc_index] = | ||
652 | g->ops.gr.get_gpc_tpc_mask(g, gpc_index); | ||
653 | } | ||
654 | |||
655 | g->ops.gr.bundle_cb_defaults(g); | ||
656 | g->ops.gr.cb_size_default(g); | ||
657 | g->ops.gr.calc_global_ctx_buffer_size(g); | ||
658 | err = g->ops.gr.init_fs_state(g); | ||
659 | if (err) | ||
660 | goto cleanup; | ||
661 | return 0; | ||
662 | cleanup: | ||
663 | nvgpu_err(g, "out of memory"); | ||
664 | |||
665 | nvgpu_kfree(g, gr->gpc_tpc_count); | ||
666 | gr->gpc_tpc_count = NULL; | ||
667 | |||
668 | nvgpu_kfree(g, gr->gpc_tpc_mask); | ||
669 | gr->gpc_tpc_mask = NULL; | ||
670 | |||
671 | return err; | ||
672 | } | ||
673 | |||
674 | int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, | ||
675 | struct channel_gk20a *c, u64 zcull_va, | ||
676 | u32 mode) | ||
677 | { | ||
678 | struct tegra_vgpu_cmd_msg msg; | ||
679 | struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; | ||
680 | int err; | ||
681 | |||
682 | gk20a_dbg_fn(""); | ||
683 | |||
684 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; | ||
685 | msg.handle = vgpu_get_handle(g); | ||
686 | p->handle = c->virt_ctx; | ||
687 | p->zcull_va = zcull_va; | ||
688 | p->mode = mode; | ||
689 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
690 | |||
691 | return (err || msg.ret) ? -ENOMEM : 0; | ||
692 | } | ||
693 | |||
694 | int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, | ||
695 | struct gr_zcull_info *zcull_params) | ||
696 | { | ||
697 | struct tegra_vgpu_cmd_msg msg; | ||
698 | struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; | ||
699 | int err; | ||
700 | |||
701 | gk20a_dbg_fn(""); | ||
702 | |||
703 | msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; | ||
704 | msg.handle = vgpu_get_handle(g); | ||
705 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
706 | if (err || msg.ret) | ||
707 | return -ENOMEM; | ||
708 | |||
709 | zcull_params->width_align_pixels = p->width_align_pixels; | ||
710 | zcull_params->height_align_pixels = p->height_align_pixels; | ||
711 | zcull_params->pixel_squares_by_aliquots = p->pixel_squares_by_aliquots; | ||
712 | zcull_params->aliquot_total = p->aliquot_total; | ||
713 | zcull_params->region_byte_multiplier = p->region_byte_multiplier; | ||
714 | zcull_params->region_header_size = p->region_header_size; | ||
715 | zcull_params->subregion_header_size = p->subregion_header_size; | ||
716 | zcull_params->subregion_width_align_pixels = | ||
717 | p->subregion_width_align_pixels; | ||
718 | zcull_params->subregion_height_align_pixels = | ||
719 | p->subregion_height_align_pixels; | ||
720 | zcull_params->subregion_count = p->subregion_count; | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) | ||
726 | { | ||
727 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
728 | |||
729 | return priv->constants.gpc_tpc_mask[gpc_index]; | ||
730 | } | ||
731 | |||
732 | u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) | ||
733 | { | ||
734 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
735 | |||
736 | gk20a_dbg_fn(""); | ||
737 | |||
738 | return priv->constants.num_fbps; | ||
739 | } | ||
740 | |||
741 | u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) | ||
742 | { | ||
743 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
744 | |||
745 | gk20a_dbg_fn(""); | ||
746 | |||
747 | return priv->constants.fbp_en_mask; | ||
748 | } | ||
749 | |||
750 | u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) | ||
751 | { | ||
752 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
753 | |||
754 | gk20a_dbg_fn(""); | ||
755 | |||
756 | return priv->constants.ltc_per_fbp; | ||
757 | } | ||
758 | |||
759 | u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) | ||
760 | { | ||
761 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | ||
762 | |||
763 | gk20a_dbg_fn(""); | ||
764 | |||
765 | return priv->constants.max_lts_per_ltc; | ||
766 | } | ||
767 | |||
768 | u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g) | ||
769 | { | ||
770 | /* no one use it yet */ | ||
771 | return NULL; | ||
772 | } | ||
773 | |||
774 | int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | ||
775 | struct zbc_entry *zbc_val) | ||
776 | { | ||
777 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
778 | struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; | ||
779 | int err; | ||
780 | |||
781 | gk20a_dbg_fn(""); | ||
782 | |||
783 | msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; | ||
784 | msg.handle = vgpu_get_handle(g); | ||
785 | |||
786 | p->type = zbc_val->type; | ||
787 | p->format = zbc_val->format; | ||
788 | switch (p->type) { | ||
789 | case GK20A_ZBC_TYPE_COLOR: | ||
790 | memcpy(p->color_ds, zbc_val->color_ds, sizeof(p->color_ds)); | ||
791 | memcpy(p->color_l2, zbc_val->color_l2, sizeof(p->color_l2)); | ||
792 | break; | ||
793 | case GK20A_ZBC_TYPE_DEPTH: | ||
794 | p->depth = zbc_val->depth; | ||
795 | break; | ||
796 | default: | ||
797 | return -EINVAL; | ||
798 | } | ||
799 | |||
800 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
801 | |||
802 | return (err || msg.ret) ? -ENOMEM : 0; | ||
803 | } | ||
804 | |||
805 | int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, | ||
806 | struct zbc_query_params *query_params) | ||
807 | { | ||
808 | struct tegra_vgpu_cmd_msg msg = {0}; | ||
809 | struct tegra_vgpu_zbc_query_table_params *p = | ||
810 | &msg.params.zbc_query_table; | ||
811 | int err; | ||
812 | |||
813 | gk20a_dbg_fn(""); | ||
814 | |||
815 | msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; | ||
816 | msg.handle = vgpu_get_handle(g); | ||
817 | |||
818 | p->type = query_params->type; | ||
819 | p->index_size = query_params->index_size; | ||
820 | |||
821 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
822 | if (err || msg.ret) | ||
823 | return -ENOMEM; | ||
824 | |||
825 | switch (query_params->type) { | ||
826 | case GK20A_ZBC_TYPE_COLOR: | ||
827 | memcpy(query_params->color_ds, p->color_ds, | ||
828 | sizeof(query_params->color_ds)); | ||
829 | memcpy(query_params->color_l2, p->color_l2, | ||
830 | sizeof(query_params->color_l2)); | ||
831 | break; | ||
832 | case GK20A_ZBC_TYPE_DEPTH: | ||
833 | query_params->depth = p->depth; | ||
834 | break; | ||
835 | case GK20A_ZBC_TYPE_INVALID: | ||
836 | query_params->index_size = p->index_size; | ||
837 | break; | ||
838 | default: | ||
839 | return -EINVAL; | ||
840 | } | ||
841 | query_params->ref_cnt = p->ref_cnt; | ||
842 | query_params->format = p->format; | ||
843 | |||
844 | return 0; | ||
845 | } | ||
846 | |||
847 | static void vgpu_remove_gr_support(struct gr_gk20a *gr) | ||
848 | { | ||
849 | gk20a_dbg_fn(""); | ||
850 | |||
851 | gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); | ||
852 | |||
853 | nvgpu_kfree(gr->g, gr->sm_error_states); | ||
854 | gr->sm_error_states = NULL; | ||
855 | |||
856 | nvgpu_kfree(gr->g, gr->gpc_tpc_mask); | ||
857 | gr->gpc_tpc_mask = NULL; | ||
858 | |||
859 | nvgpu_kfree(gr->g, gr->sm_to_cluster); | ||
860 | gr->sm_to_cluster = NULL; | ||
861 | |||
862 | nvgpu_kfree(gr->g, gr->gpc_tpc_count); | ||
863 | gr->gpc_tpc_count = NULL; | ||
864 | } | ||
865 | |||
866 | static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) | ||
867 | { | ||
868 | struct gr_gk20a *gr = &g->gr; | ||
869 | int err; | ||
870 | |||
871 | gk20a_dbg_fn(""); | ||
872 | |||
873 | if (gr->sw_ready) { | ||
874 | gk20a_dbg_fn("skip init"); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | gr->g = g; | ||
879 | |||
880 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
881 | nvgpu_mutex_init(&g->gr.cs_lock); | ||
882 | #endif | ||
883 | |||
884 | err = vgpu_gr_init_gr_config(g, gr); | ||
885 | if (err) | ||
886 | goto clean_up; | ||
887 | |||
888 | err = g->ops.gr.init_ctx_state(g); | ||
889 | if (err) | ||
890 | goto clean_up; | ||
891 | |||
892 | err = g->ops.ltc.init_comptags(g, gr); | ||
893 | if (err) | ||
894 | goto clean_up; | ||
895 | |||
896 | err = vgpu_gr_alloc_global_ctx_buffers(g); | ||
897 | if (err) | ||
898 | goto clean_up; | ||
899 | |||
900 | nvgpu_mutex_init(&gr->ctx_mutex); | ||
901 | |||
902 | gr->sm_error_states = nvgpu_kzalloc(g, | ||
903 | sizeof(struct nvgpu_gr_sm_error_state) * | ||
904 | gr->no_of_sm); | ||
905 | if (!gr->sm_error_states) { | ||
906 | err = -ENOMEM; | ||
907 | goto clean_up; | ||
908 | } | ||
909 | |||
910 | gr->remove_support = vgpu_remove_gr_support; | ||
911 | gr->sw_ready = true; | ||
912 | |||
913 | gk20a_dbg_fn("done"); | ||
914 | return 0; | ||
915 | |||
916 | clean_up: | ||
917 | nvgpu_err(g, "fail"); | ||
918 | vgpu_remove_gr_support(gr); | ||
919 | return err; | ||
920 | } | ||
921 | |||
922 | int vgpu_init_gr_support(struct gk20a *g) | ||
923 | { | ||
924 | gk20a_dbg_fn(""); | ||
925 | |||
926 | return vgpu_gr_init_gr_setup_sw(g); | ||
927 | } | ||
928 | |||
929 | int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) | ||
930 | { | ||
931 | struct fifo_gk20a *f = &g->fifo; | ||
932 | struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); | ||
933 | |||
934 | gk20a_dbg_fn(""); | ||
935 | if (!ch) | ||
936 | return 0; | ||
937 | |||
938 | if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY && | ||
939 | info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE) | ||
940 | nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid); | ||
941 | |||
942 | switch (info->type) { | ||
943 | case TEGRA_VGPU_GR_INTR_NOTIFY: | ||
944 | nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); | ||
945 | break; | ||
946 | case TEGRA_VGPU_GR_INTR_SEMAPHORE: | ||
947 | nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq); | ||
948 | break; | ||
949 | case TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT: | ||
950 | gk20a_set_error_notifier(ch, | ||
951 | NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); | ||
952 | break; | ||
953 | case TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY: | ||
954 | gk20a_set_error_notifier(ch, | ||
955 | NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); | ||
956 | case TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD: | ||
957 | break; | ||
958 | case TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS: | ||
959 | gk20a_set_error_notifier(ch, | ||
960 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | ||
961 | break; | ||
962 | case TEGRA_VGPU_GR_INTR_FECS_ERROR: | ||
963 | break; | ||
964 | case TEGRA_VGPU_GR_INTR_CLASS_ERROR: | ||
965 | gk20a_set_error_notifier(ch, | ||
966 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | ||
967 | break; | ||
968 | case TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD: | ||
969 | gk20a_set_error_notifier(ch, | ||
970 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | ||
971 | break; | ||
972 | case TEGRA_VGPU_GR_INTR_EXCEPTION: | ||
973 | gk20a_set_error_notifier(ch, | ||
974 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | ||
975 | break; | ||
976 | case TEGRA_VGPU_GR_INTR_SM_EXCEPTION: | ||
977 | gk20a_dbg_gpu_post_events(ch); | ||
978 | break; | ||
979 | default: | ||
980 | WARN_ON(1); | ||
981 | break; | ||
982 | } | ||
983 | |||
984 | gk20a_channel_put(ch); | ||
985 | return 0; | ||
986 | } | ||
987 | |||
988 | int vgpu_gr_nonstall_isr(struct gk20a *g, | ||
989 | struct tegra_vgpu_gr_nonstall_intr_info *info) | ||
990 | { | ||
991 | gk20a_dbg_fn(""); | ||
992 | |||
993 | switch (info->type) { | ||
994 | case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: | ||
995 | gk20a_channel_semaphore_wakeup(g, true); | ||
996 | break; | ||
997 | default: | ||
998 | WARN_ON(1); | ||
999 | break; | ||
1000 | } | ||
1001 | |||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | int vgpu_gr_set_sm_debug_mode(struct gk20a *g, | ||
1006 | struct channel_gk20a *ch, u64 sms, bool enable) | ||
1007 | { | ||
1008 | struct tegra_vgpu_cmd_msg msg; | ||
1009 | struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; | ||
1010 | int err; | ||
1011 | |||
1012 | gk20a_dbg_fn(""); | ||
1013 | |||
1014 | msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; | ||
1015 | msg.handle = vgpu_get_handle(g); | ||
1016 | p->handle = ch->virt_ctx; | ||
1017 | p->sms = sms; | ||
1018 | p->enable = (u32)enable; | ||
1019 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1020 | WARN_ON(err || msg.ret); | ||
1021 | |||
1022 | return err ? err : msg.ret; | ||
1023 | } | ||
1024 | |||
1025 | int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, | ||
1026 | struct channel_gk20a *ch, bool enable) | ||
1027 | { | ||
1028 | struct tegra_vgpu_cmd_msg msg; | ||
1029 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; | ||
1030 | int err; | ||
1031 | |||
1032 | gk20a_dbg_fn(""); | ||
1033 | |||
1034 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; | ||
1035 | msg.handle = vgpu_get_handle(g); | ||
1036 | p->handle = ch->virt_ctx; | ||
1037 | |||
1038 | if (enable) | ||
1039 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; | ||
1040 | else | ||
1041 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; | ||
1042 | |||
1043 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1044 | WARN_ON(err || msg.ret); | ||
1045 | |||
1046 | return err ? err : msg.ret; | ||
1047 | } | ||
1048 | |||
1049 | int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, | ||
1050 | struct channel_gk20a *ch, bool enable) | ||
1051 | { | ||
1052 | struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; | ||
1053 | struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; | ||
1054 | struct tegra_vgpu_cmd_msg msg; | ||
1055 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; | ||
1056 | int err; | ||
1057 | |||
1058 | gk20a_dbg_fn(""); | ||
1059 | |||
1060 | if (enable) { | ||
1061 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; | ||
1062 | |||
1063 | /* Allocate buffer if necessary */ | ||
1064 | if (pm_ctx->mem.gpu_va == 0) { | ||
1065 | pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, | ||
1066 | g->gr.ctx_vars.pm_ctxsw_image_size, | ||
1067 | gmmu_page_size_kernel); | ||
1068 | |||
1069 | if (!pm_ctx->mem.gpu_va) | ||
1070 | return -ENOMEM; | ||
1071 | pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; | ||
1072 | } | ||
1073 | } else | ||
1074 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; | ||
1075 | |||
1076 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; | ||
1077 | msg.handle = vgpu_get_handle(g); | ||
1078 | p->handle = ch->virt_ctx; | ||
1079 | p->gpu_va = pm_ctx->mem.gpu_va; | ||
1080 | |||
1081 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1082 | WARN_ON(err || msg.ret); | ||
1083 | |||
1084 | return err ? err : msg.ret; | ||
1085 | } | ||
1086 | |||
1087 | int vgpu_gr_clear_sm_error_state(struct gk20a *g, | ||
1088 | struct channel_gk20a *ch, u32 sm_id) | ||
1089 | { | ||
1090 | struct gr_gk20a *gr = &g->gr; | ||
1091 | struct tegra_vgpu_cmd_msg msg; | ||
1092 | struct tegra_vgpu_clear_sm_error_state *p = | ||
1093 | &msg.params.clear_sm_error_state; | ||
1094 | int err; | ||
1095 | |||
1096 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1097 | msg.cmd = TEGRA_VGPU_CMD_CLEAR_SM_ERROR_STATE; | ||
1098 | msg.handle = vgpu_get_handle(g); | ||
1099 | p->handle = ch->virt_ctx; | ||
1100 | p->sm_id = sm_id; | ||
1101 | |||
1102 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1103 | WARN_ON(err || msg.ret); | ||
1104 | |||
1105 | memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); | ||
1106 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1107 | |||
1108 | return err ? err : msg.ret; | ||
1109 | |||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static int vgpu_gr_suspend_resume_contexts(struct gk20a *g, | ||
1115 | struct dbg_session_gk20a *dbg_s, | ||
1116 | int *ctx_resident_ch_fd, u32 cmd) | ||
1117 | { | ||
1118 | struct dbg_session_channel_data *ch_data; | ||
1119 | struct tegra_vgpu_cmd_msg msg; | ||
1120 | struct tegra_vgpu_suspend_resume_contexts *p; | ||
1121 | size_t n; | ||
1122 | int channel_fd = -1; | ||
1123 | int err = 0; | ||
1124 | void *handle = NULL; | ||
1125 | u16 *oob; | ||
1126 | size_t oob_size; | ||
1127 | |||
1128 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1129 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1130 | |||
1131 | handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT, | ||
1132 | tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD, | ||
1133 | (void **)&oob, &oob_size); | ||
1134 | if (!handle) { | ||
1135 | err = -EINVAL; | ||
1136 | goto done; | ||
1137 | } | ||
1138 | |||
1139 | n = 0; | ||
1140 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) | ||
1141 | n++; | ||
1142 | |||
1143 | if (oob_size < n * sizeof(u16)) { | ||
1144 | err = -ENOMEM; | ||
1145 | goto done; | ||
1146 | } | ||
1147 | |||
1148 | msg.cmd = cmd; | ||
1149 | msg.handle = vgpu_get_handle(g); | ||
1150 | p = &msg.params.suspend_contexts; | ||
1151 | p->num_channels = n; | ||
1152 | n = 0; | ||
1153 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) | ||
1154 | oob[n++] = (u16)ch_data->chid; | ||
1155 | |||
1156 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | ||
1157 | if (err || msg.ret) { | ||
1158 | err = -ENOMEM; | ||
1159 | goto done; | ||
1160 | } | ||
1161 | |||
1162 | if (p->resident_chid != (u16)~0) { | ||
1163 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { | ||
1164 | if (ch_data->chid == p->resident_chid) { | ||
1165 | channel_fd = ch_data->channel_fd; | ||
1166 | break; | ||
1167 | } | ||
1168 | } | ||
1169 | } | ||
1170 | |||
1171 | done: | ||
1172 | if (handle) | ||
1173 | tegra_gr_comm_oob_put_ptr(handle); | ||
1174 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1175 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1176 | *ctx_resident_ch_fd = channel_fd; | ||
1177 | return err; | ||
1178 | } | ||
1179 | |||
1180 | int vgpu_gr_suspend_contexts(struct gk20a *g, | ||
1181 | struct dbg_session_gk20a *dbg_s, | ||
1182 | int *ctx_resident_ch_fd) | ||
1183 | { | ||
1184 | return vgpu_gr_suspend_resume_contexts(g, dbg_s, | ||
1185 | ctx_resident_ch_fd, TEGRA_VGPU_CMD_SUSPEND_CONTEXTS); | ||
1186 | } | ||
1187 | |||
1188 | int vgpu_gr_resume_contexts(struct gk20a *g, | ||
1189 | struct dbg_session_gk20a *dbg_s, | ||
1190 | int *ctx_resident_ch_fd) | ||
1191 | { | ||
1192 | return vgpu_gr_suspend_resume_contexts(g, dbg_s, | ||
1193 | ctx_resident_ch_fd, TEGRA_VGPU_CMD_RESUME_CONTEXTS); | ||
1194 | } | ||
1195 | |||
1196 | void vgpu_gr_handle_sm_esr_event(struct gk20a *g, | ||
1197 | struct tegra_vgpu_sm_esr_info *info) | ||
1198 | { | ||
1199 | struct nvgpu_gr_sm_error_state *sm_error_states; | ||
1200 | |||
1201 | if (info->sm_id >= g->gr.no_of_sm) { | ||
1202 | nvgpu_err(g, "invalid smd_id %d / %d", | ||
1203 | info->sm_id, g->gr.no_of_sm); | ||
1204 | return; | ||
1205 | } | ||
1206 | |||
1207 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1208 | |||
1209 | sm_error_states = &g->gr.sm_error_states[info->sm_id]; | ||
1210 | |||
1211 | sm_error_states->hww_global_esr = info->hww_global_esr; | ||
1212 | sm_error_states->hww_warp_esr = info->hww_warp_esr; | ||
1213 | sm_error_states->hww_warp_esr_pc = info->hww_warp_esr_pc; | ||
1214 | sm_error_states->hww_global_esr_report_mask = | ||
1215 | info->hww_global_esr_report_mask; | ||
1216 | sm_error_states->hww_warp_esr_report_mask = | ||
1217 | info->hww_warp_esr_report_mask; | ||
1218 | |||
1219 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1220 | } | ||