summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c308
1 files changed, 308 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
new file mode 100644
index 00000000..ab35dc67
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/bug.h>
26#include <nvgpu/vgpu/vgpu.h>
27
28#include "vgpu/gm20b/vgpu_gr_gm20b.h"
29
30#include "gp10b/gr_gp10b.h"
31#include "vgpu_gr_gp10b.h"
32
33#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
34
35int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
36 struct nvgpu_gr_ctx *gr_ctx,
37 struct vm_gk20a *vm,
38 u32 class,
39 u32 flags)
40{
41 u32 graphics_preempt_mode = 0;
42 u32 compute_preempt_mode = 0;
43 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
44 int err;
45
46 gk20a_dbg_fn("");
47
48 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
49 if (err)
50 return err;
51
52 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
53 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
54 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
55 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
56
57 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
58 !compute_preempt_mode) {
59 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
60 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
61 compute_preempt_mode =
62 g->ops.gr.is_valid_compute_class(g, class) ?
63 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
64 }
65
66 if (graphics_preempt_mode || compute_preempt_mode) {
67 if (g->ops.gr.set_ctxsw_preemption_mode) {
68 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
69 class, graphics_preempt_mode, compute_preempt_mode);
70 if (err) {
71 nvgpu_err(g,
72 "set_ctxsw_preemption_mode failed");
73 goto fail;
74 }
75 } else {
76 err = -ENOSYS;
77 goto fail;
78 }
79 }
80
81 gk20a_dbg_fn("done");
82 return err;
83
84fail:
85 vgpu_gr_free_gr_ctx(g, vm, gr_ctx);
86 return err;
87}
88
89int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
90 struct nvgpu_gr_ctx *gr_ctx,
91 struct vm_gk20a *vm, u32 class,
92 u32 graphics_preempt_mode,
93 u32 compute_preempt_mode)
94{
95 struct tegra_vgpu_cmd_msg msg = {};
96 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
97 &msg.params.gr_bind_ctxsw_buffers;
98 int err = 0;
99
100 if (g->ops.gr.is_valid_gfx_class(g, class) &&
101 g->gr.ctx_vars.force_preemption_gfxp)
102 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
103
104 if (g->ops.gr.is_valid_compute_class(g, class) &&
105 g->gr.ctx_vars.force_preemption_cilp)
106 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
107
108 /* check for invalid combinations */
109 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
110 return -EINVAL;
111
112 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
113 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
114 return -EINVAL;
115
116 /* set preemption modes */
117 switch (graphics_preempt_mode) {
118 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
119 {
120 u32 spill_size =
121 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
122 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
123 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
124 gr_scc_pagepool_total_pages_byte_granularity_v();
125 u32 betacb_size = g->gr.attrib_cb_default_size +
126 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
127 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
128 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
129 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
130 g->gr.max_tpc_count;
131 struct nvgpu_mem *desc;
132
133 attrib_cb_size = ALIGN(attrib_cb_size, 128);
134
135 gk20a_dbg_info("gfxp context preempt size=%d",
136 g->gr.ctx_vars.preempt_image_size);
137 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
138 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
139 gk20a_dbg_info("gfxp context attrib cb size=%d",
140 attrib_cb_size);
141
142 err = gr_gp10b_alloc_buffer(vm,
143 g->gr.ctx_vars.preempt_image_size,
144 &gr_ctx->preempt_ctxsw_buffer);
145 if (err) {
146 err = -ENOMEM;
147 goto fail;
148 }
149 desc = &gr_ctx->preempt_ctxsw_buffer;
150 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
151 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
152
153 err = gr_gp10b_alloc_buffer(vm,
154 spill_size,
155 &gr_ctx->spill_ctxsw_buffer);
156 if (err) {
157 err = -ENOMEM;
158 goto fail;
159 }
160 desc = &gr_ctx->spill_ctxsw_buffer;
161 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
162 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
163
164 err = gr_gp10b_alloc_buffer(vm,
165 pagepool_size,
166 &gr_ctx->pagepool_ctxsw_buffer);
167 if (err) {
168 err = -ENOMEM;
169 goto fail;
170 }
171 desc = &gr_ctx->pagepool_ctxsw_buffer;
172 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
173 desc->gpu_va;
174 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
175
176 err = gr_gp10b_alloc_buffer(vm,
177 attrib_cb_size,
178 &gr_ctx->betacb_ctxsw_buffer);
179 if (err) {
180 err = -ENOMEM;
181 goto fail;
182 }
183 desc = &gr_ctx->betacb_ctxsw_buffer;
184 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
185 desc->gpu_va;
186 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
187
188 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
189 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
190 break;
191 }
192 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
193 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
194 break;
195
196 default:
197 break;
198 }
199
200 if (g->ops.gr.is_valid_compute_class(g, class)) {
201 switch (compute_preempt_mode) {
202 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
203 gr_ctx->compute_preempt_mode =
204 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
205 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
206 break;
207 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
208 gr_ctx->compute_preempt_mode =
209 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
210 p->mode =
211 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
212 break;
213 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
214 gr_ctx->compute_preempt_mode =
215 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
216 p->mode =
217 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
218 break;
219 default:
220 break;
221 }
222 }
223
224 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
225 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
226 msg.handle = vgpu_get_handle(g);
227 p->gr_ctx_handle = gr_ctx->virt_ctx;
228 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
229 if (err || msg.ret) {
230 err = -ENOMEM;
231 goto fail;
232 }
233 }
234
235 return err;
236
237fail:
238 nvgpu_err(g, "%s failed %d", __func__, err);
239 return err;
240}
241
242int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
243 u32 graphics_preempt_mode,
244 u32 compute_preempt_mode)
245{
246 struct nvgpu_gr_ctx *gr_ctx;
247 struct gk20a *g = ch->g;
248 struct tsg_gk20a *tsg;
249 struct vm_gk20a *vm;
250 u32 class;
251 int err;
252
253 class = ch->obj_class;
254 if (!class)
255 return -EINVAL;
256
257 tsg = tsg_gk20a_from_ch(ch);
258 if (!tsg)
259 return -EINVAL;
260
261 vm = tsg->vm;
262 gr_ctx = &tsg->gr_ctx;
263
264 /* skip setting anything if both modes are already set */
265 if (graphics_preempt_mode &&
266 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
267 graphics_preempt_mode = 0;
268
269 if (compute_preempt_mode &&
270 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
271 compute_preempt_mode = 0;
272
273 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
274 return 0;
275
276 if (g->ops.gr.set_ctxsw_preemption_mode) {
277 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
278 graphics_preempt_mode,
279 compute_preempt_mode);
280 if (err) {
281 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
282 return err;
283 }
284 } else {
285 err = -ENOSYS;
286 }
287
288 return err;
289}
290
291int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
292{
293 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
294 int err;
295
296 gk20a_dbg_fn("");
297
298 err = vgpu_gr_init_ctx_state(g);
299 if (err)
300 return err;
301
302 g->gr.ctx_vars.preempt_image_size =
303 priv->constants.preempt_ctx_size;
304 if (!g->gr.ctx_vars.preempt_image_size)
305 return -EINVAL;
306
307 return 0;
308}