summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-14 09:43:28 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 11:27:19 -0500
commitb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (patch)
tree26e2d919f019d15b51bba4d7b5c938f77ad5cff5 /drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
parentb7cc3a2aa6c92a09eed43513287c9062f22ad127 (diff)
gpu: nvgpu: move vgpu code to linux
Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c338
1 files changed, 0 insertions, 338 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
deleted file mode 100644
index 8a5130f6..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/bug.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu/gm20b/vgpu_gr_gm20b.h"
29
30#include "vgpu_gr_gp10b.h"
31
32#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
33
34void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
35 struct gr_ctx_desc *gr_ctx)
36{
37 struct tegra_vgpu_cmd_msg msg = {0};
38 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
39 int err;
40
41 gk20a_dbg_fn("");
42
43 if (!gr_ctx || !gr_ctx->mem.gpu_va)
44 return;
45
46 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
47 msg.handle = vgpu_get_handle(g);
48 p->gr_ctx_handle = gr_ctx->virt_ctx;
49 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
50 WARN_ON(err || msg.ret);
51
52 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
53
54 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
55 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
56 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
57 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
58
59 nvgpu_kfree(g, gr_ctx);
60}
61
62int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
63 struct gr_ctx_desc **__gr_ctx,
64 struct vm_gk20a *vm,
65 u32 class,
66 u32 flags)
67{
68 struct gr_ctx_desc *gr_ctx;
69 u32 graphics_preempt_mode = 0;
70 u32 compute_preempt_mode = 0;
71 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
72 int err;
73
74 gk20a_dbg_fn("");
75
76 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags);
77 if (err)
78 return err;
79
80 gr_ctx = *__gr_ctx;
81
82 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
83 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
84 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
85 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
86
87 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
88 !compute_preempt_mode) {
89 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
90 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
91 compute_preempt_mode =
92 g->ops.gr.is_valid_compute_class(g, class) ?
93 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
94 }
95
96 if (graphics_preempt_mode || compute_preempt_mode) {
97 if (g->ops.gr.set_ctxsw_preemption_mode) {
98 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
99 class, graphics_preempt_mode, compute_preempt_mode);
100 if (err) {
101 nvgpu_err(g,
102 "set_ctxsw_preemption_mode failed");
103 goto fail;
104 }
105 } else {
106 err = -ENOSYS;
107 goto fail;
108 }
109 }
110
111 gk20a_dbg_fn("done");
112 return err;
113
114fail:
115 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
116 return err;
117}
118
119int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
120 struct gr_ctx_desc *gr_ctx,
121 struct vm_gk20a *vm, u32 class,
122 u32 graphics_preempt_mode,
123 u32 compute_preempt_mode)
124{
125 struct tegra_vgpu_cmd_msg msg = {};
126 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
127 &msg.params.gr_bind_ctxsw_buffers;
128 int err = 0;
129
130 if (g->ops.gr.is_valid_gfx_class(g, class) &&
131 g->gr.t18x.ctx_vars.force_preemption_gfxp)
132 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
133
134 if (g->ops.gr.is_valid_compute_class(g, class) &&
135 g->gr.t18x.ctx_vars.force_preemption_cilp)
136 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
137
138 /* check for invalid combinations */
139 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
140 return -EINVAL;
141
142 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
143 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
144 return -EINVAL;
145
146 /* set preemption modes */
147 switch (graphics_preempt_mode) {
148 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
149 {
150 u32 spill_size =
151 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
152 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
153 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
154 gr_scc_pagepool_total_pages_byte_granularity_v();
155 u32 betacb_size = g->gr.attrib_cb_default_size +
156 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
157 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
158 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
159 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
160 g->gr.max_tpc_count;
161 struct nvgpu_mem *desc;
162
163 attrib_cb_size = ALIGN(attrib_cb_size, 128);
164
165 gk20a_dbg_info("gfxp context preempt size=%d",
166 g->gr.t18x.ctx_vars.preempt_image_size);
167 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
168 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
169 gk20a_dbg_info("gfxp context attrib cb size=%d",
170 attrib_cb_size);
171
172 err = gr_gp10b_alloc_buffer(vm,
173 g->gr.t18x.ctx_vars.preempt_image_size,
174 &gr_ctx->t18x.preempt_ctxsw_buffer);
175 if (err) {
176 err = -ENOMEM;
177 goto fail;
178 }
179 desc = &gr_ctx->t18x.preempt_ctxsw_buffer;
180 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
181 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
182
183 err = gr_gp10b_alloc_buffer(vm,
184 spill_size,
185 &gr_ctx->t18x.spill_ctxsw_buffer);
186 if (err) {
187 err = -ENOMEM;
188 goto fail;
189 }
190 desc = &gr_ctx->t18x.spill_ctxsw_buffer;
191 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
192 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
193
194 err = gr_gp10b_alloc_buffer(vm,
195 pagepool_size,
196 &gr_ctx->t18x.pagepool_ctxsw_buffer);
197 if (err) {
198 err = -ENOMEM;
199 goto fail;
200 }
201 desc = &gr_ctx->t18x.pagepool_ctxsw_buffer;
202 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
203 desc->gpu_va;
204 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
205
206 err = gr_gp10b_alloc_buffer(vm,
207 attrib_cb_size,
208 &gr_ctx->t18x.betacb_ctxsw_buffer);
209 if (err) {
210 err = -ENOMEM;
211 goto fail;
212 }
213 desc = &gr_ctx->t18x.betacb_ctxsw_buffer;
214 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
215 desc->gpu_va;
216 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
217
218 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
219 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
220 break;
221 }
222 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
223 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
224 break;
225
226 default:
227 break;
228 }
229
230 if (g->ops.gr.is_valid_compute_class(g, class)) {
231 switch (compute_preempt_mode) {
232 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
233 gr_ctx->compute_preempt_mode =
234 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
235 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
236 break;
237 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
238 gr_ctx->compute_preempt_mode =
239 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
240 p->mode =
241 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
242 break;
243 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
244 gr_ctx->compute_preempt_mode =
245 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
246 p->mode =
247 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
248 break;
249 default:
250 break;
251 }
252 }
253
254 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
255 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
256 msg.handle = vgpu_get_handle(g);
257 p->gr_ctx_handle = gr_ctx->virt_ctx;
258 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
259 if (err || msg.ret) {
260 err = -ENOMEM;
261 goto fail;
262 }
263 }
264
265 return err;
266
267fail:
268 nvgpu_err(g, "%s failed %d", __func__, err);
269 return err;
270}
271
272int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
273 u32 graphics_preempt_mode,
274 u32 compute_preempt_mode)
275{
276 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
277 struct gk20a *g = ch->g;
278 struct tsg_gk20a *tsg;
279 struct vm_gk20a *vm;
280 u32 class;
281 int err;
282
283 class = ch->obj_class;
284 if (!class)
285 return -EINVAL;
286
287 /* skip setting anything if both modes are already set */
288 if (graphics_preempt_mode &&
289 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
290 graphics_preempt_mode = 0;
291
292 if (compute_preempt_mode &&
293 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
294 compute_preempt_mode = 0;
295
296 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
297 return 0;
298
299 if (gk20a_is_channel_marked_as_tsg(ch)) {
300 tsg = &g->fifo.tsg[ch->tsgid];
301 vm = tsg->vm;
302 } else {
303 vm = ch->vm;
304 }
305
306 if (g->ops.gr.set_ctxsw_preemption_mode) {
307 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
308 graphics_preempt_mode,
309 compute_preempt_mode);
310 if (err) {
311 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
312 return err;
313 }
314 } else {
315 err = -ENOSYS;
316 }
317
318 return err;
319}
320
321int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
322{
323 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
324 int err;
325
326 gk20a_dbg_fn("");
327
328 err = vgpu_gr_init_ctx_state(g);
329 if (err)
330 return err;
331
332 g->gr.t18x.ctx_vars.preempt_image_size =
333 priv->constants.preempt_ctx_size;
334 if (!g->gr.t18x.ctx_vars.preempt_image_size)
335 return -EINVAL;
336
337 return 0;
338}