summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c332
1 files changed, 332 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
new file mode 100644
index 00000000..efc9c595
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/kmem.h>
18#include <nvgpu/dma.h>
19#include <nvgpu/bug.h>
20
21#include "common/linux/vgpu/vgpu.h"
22#include "common/linux/vgpu/gm20b/vgpu_gr_gm20b.h"
23
24#include "vgpu_gr_gp10b.h"
25
26#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
27
28void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
29 struct gr_ctx_desc *gr_ctx)
30{
31 struct tegra_vgpu_cmd_msg msg = {0};
32 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
33 int err;
34
35 gk20a_dbg_fn("");
36
37 if (!gr_ctx || !gr_ctx->mem.gpu_va)
38 return;
39
40 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
41 msg.handle = vgpu_get_handle(g);
42 p->gr_ctx_handle = gr_ctx->virt_ctx;
43 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
44 WARN_ON(err || msg.ret);
45
46 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
47
48 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
49 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
50 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
51 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
52
53 nvgpu_kfree(g, gr_ctx);
54}
55
56int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
57 struct gr_ctx_desc **__gr_ctx,
58 struct vm_gk20a *vm,
59 u32 class,
60 u32 flags)
61{
62 struct gr_ctx_desc *gr_ctx;
63 u32 graphics_preempt_mode = 0;
64 u32 compute_preempt_mode = 0;
65 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
66 int err;
67
68 gk20a_dbg_fn("");
69
70 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags);
71 if (err)
72 return err;
73
74 gr_ctx = *__gr_ctx;
75
76 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
77 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
78 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
79 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
80
81 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
82 !compute_preempt_mode) {
83 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
84 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
85 compute_preempt_mode =
86 g->ops.gr.is_valid_compute_class(g, class) ?
87 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
88 }
89
90 if (graphics_preempt_mode || compute_preempt_mode) {
91 if (g->ops.gr.set_ctxsw_preemption_mode) {
92 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
93 class, graphics_preempt_mode, compute_preempt_mode);
94 if (err) {
95 nvgpu_err(g,
96 "set_ctxsw_preemption_mode failed");
97 goto fail;
98 }
99 } else {
100 err = -ENOSYS;
101 goto fail;
102 }
103 }
104
105 gk20a_dbg_fn("done");
106 return err;
107
108fail:
109 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
110 return err;
111}
112
113int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
114 struct gr_ctx_desc *gr_ctx,
115 struct vm_gk20a *vm, u32 class,
116 u32 graphics_preempt_mode,
117 u32 compute_preempt_mode)
118{
119 struct tegra_vgpu_cmd_msg msg = {};
120 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
121 &msg.params.gr_bind_ctxsw_buffers;
122 int err = 0;
123
124 if (g->ops.gr.is_valid_gfx_class(g, class) &&
125 g->gr.t18x.ctx_vars.force_preemption_gfxp)
126 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
127
128 if (g->ops.gr.is_valid_compute_class(g, class) &&
129 g->gr.t18x.ctx_vars.force_preemption_cilp)
130 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
131
132 /* check for invalid combinations */
133 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
134 return -EINVAL;
135
136 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
137 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
138 return -EINVAL;
139
140 /* set preemption modes */
141 switch (graphics_preempt_mode) {
142 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
143 {
144 u32 spill_size =
145 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
146 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
147 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
148 gr_scc_pagepool_total_pages_byte_granularity_v();
149 u32 betacb_size = g->gr.attrib_cb_default_size +
150 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
151 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
152 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
153 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
154 g->gr.max_tpc_count;
155 struct nvgpu_mem *desc;
156
157 attrib_cb_size = ALIGN(attrib_cb_size, 128);
158
159 gk20a_dbg_info("gfxp context preempt size=%d",
160 g->gr.t18x.ctx_vars.preempt_image_size);
161 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
162 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
163 gk20a_dbg_info("gfxp context attrib cb size=%d",
164 attrib_cb_size);
165
166 err = gr_gp10b_alloc_buffer(vm,
167 g->gr.t18x.ctx_vars.preempt_image_size,
168 &gr_ctx->t18x.preempt_ctxsw_buffer);
169 if (err) {
170 err = -ENOMEM;
171 goto fail;
172 }
173 desc = &gr_ctx->t18x.preempt_ctxsw_buffer;
174 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
175 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
176
177 err = gr_gp10b_alloc_buffer(vm,
178 spill_size,
179 &gr_ctx->t18x.spill_ctxsw_buffer);
180 if (err) {
181 err = -ENOMEM;
182 goto fail;
183 }
184 desc = &gr_ctx->t18x.spill_ctxsw_buffer;
185 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
186 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
187
188 err = gr_gp10b_alloc_buffer(vm,
189 pagepool_size,
190 &gr_ctx->t18x.pagepool_ctxsw_buffer);
191 if (err) {
192 err = -ENOMEM;
193 goto fail;
194 }
195 desc = &gr_ctx->t18x.pagepool_ctxsw_buffer;
196 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
197 desc->gpu_va;
198 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
199
200 err = gr_gp10b_alloc_buffer(vm,
201 attrib_cb_size,
202 &gr_ctx->t18x.betacb_ctxsw_buffer);
203 if (err) {
204 err = -ENOMEM;
205 goto fail;
206 }
207 desc = &gr_ctx->t18x.betacb_ctxsw_buffer;
208 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
209 desc->gpu_va;
210 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
211
212 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
213 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
214 break;
215 }
216 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
217 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
218 break;
219
220 default:
221 break;
222 }
223
224 if (g->ops.gr.is_valid_compute_class(g, class)) {
225 switch (compute_preempt_mode) {
226 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
227 gr_ctx->compute_preempt_mode =
228 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
229 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
230 break;
231 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
232 gr_ctx->compute_preempt_mode =
233 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
234 p->mode =
235 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
236 break;
237 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
238 gr_ctx->compute_preempt_mode =
239 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
240 p->mode =
241 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
242 break;
243 default:
244 break;
245 }
246 }
247
248 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
249 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
250 msg.handle = vgpu_get_handle(g);
251 p->gr_ctx_handle = gr_ctx->virt_ctx;
252 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
253 if (err || msg.ret) {
254 err = -ENOMEM;
255 goto fail;
256 }
257 }
258
259 return err;
260
261fail:
262 nvgpu_err(g, "%s failed %d", __func__, err);
263 return err;
264}
265
266int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
267 u32 graphics_preempt_mode,
268 u32 compute_preempt_mode)
269{
270 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
271 struct gk20a *g = ch->g;
272 struct tsg_gk20a *tsg;
273 struct vm_gk20a *vm;
274 u32 class;
275 int err;
276
277 class = ch->obj_class;
278 if (!class)
279 return -EINVAL;
280
281 /* skip setting anything if both modes are already set */
282 if (graphics_preempt_mode &&
283 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
284 graphics_preempt_mode = 0;
285
286 if (compute_preempt_mode &&
287 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
288 compute_preempt_mode = 0;
289
290 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
291 return 0;
292
293 if (gk20a_is_channel_marked_as_tsg(ch)) {
294 tsg = &g->fifo.tsg[ch->tsgid];
295 vm = tsg->vm;
296 } else {
297 vm = ch->vm;
298 }
299
300 if (g->ops.gr.set_ctxsw_preemption_mode) {
301 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
302 graphics_preempt_mode,
303 compute_preempt_mode);
304 if (err) {
305 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
306 return err;
307 }
308 } else {
309 err = -ENOSYS;
310 }
311
312 return err;
313}
314
315int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
316{
317 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
318 int err;
319
320 gk20a_dbg_fn("");
321
322 err = vgpu_gr_init_ctx_state(g);
323 if (err)
324 return err;
325
326 g->gr.t18x.ctx_vars.preempt_image_size =
327 priv->constants.preempt_ctx_size;
328 if (!g->gr.t18x.ctx_vars.preempt_image_size)
329 return -EINVAL;
330
331 return 0;
332}