summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c302
1 files changed, 0 insertions, 302 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
deleted file mode 100644
index 9adf20d1..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
+++ /dev/null
@@ -1,302 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/kmem.h>
18#include <nvgpu/dma.h>
19#include <nvgpu/bug.h>
20
21#include "common/linux/vgpu/vgpu.h"
22#include "common/linux/vgpu/gm20b/vgpu_gr_gm20b.h"
23
24#include "gp10b/gr_gp10b.h"
25#include "vgpu_gr_gp10b.h"
26
27#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
28
29int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
30 struct nvgpu_gr_ctx *gr_ctx,
31 struct vm_gk20a *vm,
32 u32 class,
33 u32 flags)
34{
35 u32 graphics_preempt_mode = 0;
36 u32 compute_preempt_mode = 0;
37 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
38 int err;
39
40 gk20a_dbg_fn("");
41
42 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
43 if (err)
44 return err;
45
46 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
47 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
48 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
49 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
50
51 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
52 !compute_preempt_mode) {
53 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
54 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
55 compute_preempt_mode =
56 g->ops.gr.is_valid_compute_class(g, class) ?
57 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
58 }
59
60 if (graphics_preempt_mode || compute_preempt_mode) {
61 if (g->ops.gr.set_ctxsw_preemption_mode) {
62 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
63 class, graphics_preempt_mode, compute_preempt_mode);
64 if (err) {
65 nvgpu_err(g,
66 "set_ctxsw_preemption_mode failed");
67 goto fail;
68 }
69 } else {
70 err = -ENOSYS;
71 goto fail;
72 }
73 }
74
75 gk20a_dbg_fn("done");
76 return err;
77
78fail:
79 vgpu_gr_free_gr_ctx(g, vm, gr_ctx);
80 return err;
81}
82
83int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
84 struct nvgpu_gr_ctx *gr_ctx,
85 struct vm_gk20a *vm, u32 class,
86 u32 graphics_preempt_mode,
87 u32 compute_preempt_mode)
88{
89 struct tegra_vgpu_cmd_msg msg = {};
90 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
91 &msg.params.gr_bind_ctxsw_buffers;
92 int err = 0;
93
94 if (g->ops.gr.is_valid_gfx_class(g, class) &&
95 g->gr.ctx_vars.force_preemption_gfxp)
96 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
97
98 if (g->ops.gr.is_valid_compute_class(g, class) &&
99 g->gr.ctx_vars.force_preemption_cilp)
100 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
101
102 /* check for invalid combinations */
103 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
104 return -EINVAL;
105
106 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
107 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
108 return -EINVAL;
109
110 /* set preemption modes */
111 switch (graphics_preempt_mode) {
112 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
113 {
114 u32 spill_size =
115 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
116 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
117 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
118 gr_scc_pagepool_total_pages_byte_granularity_v();
119 u32 betacb_size = g->gr.attrib_cb_default_size +
120 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
121 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
122 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
123 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
124 g->gr.max_tpc_count;
125 struct nvgpu_mem *desc;
126
127 attrib_cb_size = ALIGN(attrib_cb_size, 128);
128
129 gk20a_dbg_info("gfxp context preempt size=%d",
130 g->gr.ctx_vars.preempt_image_size);
131 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
132 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
133 gk20a_dbg_info("gfxp context attrib cb size=%d",
134 attrib_cb_size);
135
136 err = gr_gp10b_alloc_buffer(vm,
137 g->gr.ctx_vars.preempt_image_size,
138 &gr_ctx->preempt_ctxsw_buffer);
139 if (err) {
140 err = -ENOMEM;
141 goto fail;
142 }
143 desc = &gr_ctx->preempt_ctxsw_buffer;
144 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
145 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
146
147 err = gr_gp10b_alloc_buffer(vm,
148 spill_size,
149 &gr_ctx->spill_ctxsw_buffer);
150 if (err) {
151 err = -ENOMEM;
152 goto fail;
153 }
154 desc = &gr_ctx->spill_ctxsw_buffer;
155 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
156 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
157
158 err = gr_gp10b_alloc_buffer(vm,
159 pagepool_size,
160 &gr_ctx->pagepool_ctxsw_buffer);
161 if (err) {
162 err = -ENOMEM;
163 goto fail;
164 }
165 desc = &gr_ctx->pagepool_ctxsw_buffer;
166 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
167 desc->gpu_va;
168 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
169
170 err = gr_gp10b_alloc_buffer(vm,
171 attrib_cb_size,
172 &gr_ctx->betacb_ctxsw_buffer);
173 if (err) {
174 err = -ENOMEM;
175 goto fail;
176 }
177 desc = &gr_ctx->betacb_ctxsw_buffer;
178 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
179 desc->gpu_va;
180 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
181
182 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
183 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
184 break;
185 }
186 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
187 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
188 break;
189
190 default:
191 break;
192 }
193
194 if (g->ops.gr.is_valid_compute_class(g, class)) {
195 switch (compute_preempt_mode) {
196 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
197 gr_ctx->compute_preempt_mode =
198 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
199 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
200 break;
201 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
202 gr_ctx->compute_preempt_mode =
203 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
204 p->mode =
205 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
206 break;
207 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
208 gr_ctx->compute_preempt_mode =
209 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
210 p->mode =
211 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
212 break;
213 default:
214 break;
215 }
216 }
217
218 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
219 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
220 msg.handle = vgpu_get_handle(g);
221 p->gr_ctx_handle = gr_ctx->virt_ctx;
222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
223 if (err || msg.ret) {
224 err = -ENOMEM;
225 goto fail;
226 }
227 }
228
229 return err;
230
231fail:
232 nvgpu_err(g, "%s failed %d", __func__, err);
233 return err;
234}
235
236int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
237 u32 graphics_preempt_mode,
238 u32 compute_preempt_mode)
239{
240 struct nvgpu_gr_ctx *gr_ctx;
241 struct gk20a *g = ch->g;
242 struct tsg_gk20a *tsg;
243 struct vm_gk20a *vm;
244 u32 class;
245 int err;
246
247 class = ch->obj_class;
248 if (!class)
249 return -EINVAL;
250
251 tsg = tsg_gk20a_from_ch(ch);
252 if (!tsg)
253 return -EINVAL;
254
255 vm = tsg->vm;
256 gr_ctx = &tsg->gr_ctx;
257
258 /* skip setting anything if both modes are already set */
259 if (graphics_preempt_mode &&
260 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
261 graphics_preempt_mode = 0;
262
263 if (compute_preempt_mode &&
264 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
265 compute_preempt_mode = 0;
266
267 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
268 return 0;
269
270 if (g->ops.gr.set_ctxsw_preemption_mode) {
271 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
272 graphics_preempt_mode,
273 compute_preempt_mode);
274 if (err) {
275 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
276 return err;
277 }
278 } else {
279 err = -ENOSYS;
280 }
281
282 return err;
283}
284
285int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
286{
287 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
288 int err;
289
290 gk20a_dbg_fn("");
291
292 err = vgpu_gr_init_ctx_state(g);
293 if (err)
294 return err;
295
296 g->gr.ctx_vars.preempt_image_size =
297 priv->constants.preempt_ctx_size;
298 if (!g->gr.ctx_vars.preempt_image_size)
299 return -EINVAL;
300
301 return 0;
302}