summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-14 09:43:28 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 11:27:19 -0500
commitb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (patch)
tree26e2d919f019d15b51bba4d7b5c938f77ad5cff5 /drivers/gpu/nvgpu/vgpu/gp10b
parentb7cc3a2aa6c92a09eed43513287c9062f22ad127 (diff)
gpu: nvgpu: move vgpu code to linux
Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c338
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h45
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c630
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c203
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h45
6 files changed, 0 insertions, 1291 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c
deleted file mode 100644
index 4348db8e..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vgpu_fifo_gp10b.h"
24
25void vgpu_gp10b_init_fifo_ops(struct gpu_ops *gops)
26{
27 /* syncpoint protection not supported yet */
28 gops->fifo.resetup_ramfc = NULL;
29 gops->fifo.reschedule_runlist = NULL;
30}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
deleted file mode 100644
index 8a5130f6..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/bug.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu/gm20b/vgpu_gr_gm20b.h"
29
30#include "vgpu_gr_gp10b.h"
31
32#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
33
34void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
35 struct gr_ctx_desc *gr_ctx)
36{
37 struct tegra_vgpu_cmd_msg msg = {0};
38 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
39 int err;
40
41 gk20a_dbg_fn("");
42
43 if (!gr_ctx || !gr_ctx->mem.gpu_va)
44 return;
45
46 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
47 msg.handle = vgpu_get_handle(g);
48 p->gr_ctx_handle = gr_ctx->virt_ctx;
49 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
50 WARN_ON(err || msg.ret);
51
52 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
53
54 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
55 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
56 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
57 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
58
59 nvgpu_kfree(g, gr_ctx);
60}
61
62int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
63 struct gr_ctx_desc **__gr_ctx,
64 struct vm_gk20a *vm,
65 u32 class,
66 u32 flags)
67{
68 struct gr_ctx_desc *gr_ctx;
69 u32 graphics_preempt_mode = 0;
70 u32 compute_preempt_mode = 0;
71 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
72 int err;
73
74 gk20a_dbg_fn("");
75
76 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags);
77 if (err)
78 return err;
79
80 gr_ctx = *__gr_ctx;
81
82 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
83 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
84 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
85 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
86
87 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
88 !compute_preempt_mode) {
89 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
90 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
91 compute_preempt_mode =
92 g->ops.gr.is_valid_compute_class(g, class) ?
93 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
94 }
95
96 if (graphics_preempt_mode || compute_preempt_mode) {
97 if (g->ops.gr.set_ctxsw_preemption_mode) {
98 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
99 class, graphics_preempt_mode, compute_preempt_mode);
100 if (err) {
101 nvgpu_err(g,
102 "set_ctxsw_preemption_mode failed");
103 goto fail;
104 }
105 } else {
106 err = -ENOSYS;
107 goto fail;
108 }
109 }
110
111 gk20a_dbg_fn("done");
112 return err;
113
114fail:
115 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
116 return err;
117}
118
119int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
120 struct gr_ctx_desc *gr_ctx,
121 struct vm_gk20a *vm, u32 class,
122 u32 graphics_preempt_mode,
123 u32 compute_preempt_mode)
124{
125 struct tegra_vgpu_cmd_msg msg = {};
126 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
127 &msg.params.gr_bind_ctxsw_buffers;
128 int err = 0;
129
130 if (g->ops.gr.is_valid_gfx_class(g, class) &&
131 g->gr.t18x.ctx_vars.force_preemption_gfxp)
132 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
133
134 if (g->ops.gr.is_valid_compute_class(g, class) &&
135 g->gr.t18x.ctx_vars.force_preemption_cilp)
136 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
137
138 /* check for invalid combinations */
139 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
140 return -EINVAL;
141
142 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
143 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
144 return -EINVAL;
145
146 /* set preemption modes */
147 switch (graphics_preempt_mode) {
148 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
149 {
150 u32 spill_size =
151 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
152 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
153 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
154 gr_scc_pagepool_total_pages_byte_granularity_v();
155 u32 betacb_size = g->gr.attrib_cb_default_size +
156 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
157 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
158 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
159 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
160 g->gr.max_tpc_count;
161 struct nvgpu_mem *desc;
162
163 attrib_cb_size = ALIGN(attrib_cb_size, 128);
164
165 gk20a_dbg_info("gfxp context preempt size=%d",
166 g->gr.t18x.ctx_vars.preempt_image_size);
167 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
168 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
169 gk20a_dbg_info("gfxp context attrib cb size=%d",
170 attrib_cb_size);
171
172 err = gr_gp10b_alloc_buffer(vm,
173 g->gr.t18x.ctx_vars.preempt_image_size,
174 &gr_ctx->t18x.preempt_ctxsw_buffer);
175 if (err) {
176 err = -ENOMEM;
177 goto fail;
178 }
179 desc = &gr_ctx->t18x.preempt_ctxsw_buffer;
180 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
181 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
182
183 err = gr_gp10b_alloc_buffer(vm,
184 spill_size,
185 &gr_ctx->t18x.spill_ctxsw_buffer);
186 if (err) {
187 err = -ENOMEM;
188 goto fail;
189 }
190 desc = &gr_ctx->t18x.spill_ctxsw_buffer;
191 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
192 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
193
194 err = gr_gp10b_alloc_buffer(vm,
195 pagepool_size,
196 &gr_ctx->t18x.pagepool_ctxsw_buffer);
197 if (err) {
198 err = -ENOMEM;
199 goto fail;
200 }
201 desc = &gr_ctx->t18x.pagepool_ctxsw_buffer;
202 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
203 desc->gpu_va;
204 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
205
206 err = gr_gp10b_alloc_buffer(vm,
207 attrib_cb_size,
208 &gr_ctx->t18x.betacb_ctxsw_buffer);
209 if (err) {
210 err = -ENOMEM;
211 goto fail;
212 }
213 desc = &gr_ctx->t18x.betacb_ctxsw_buffer;
214 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
215 desc->gpu_va;
216 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
217
218 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
219 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
220 break;
221 }
222 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
223 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
224 break;
225
226 default:
227 break;
228 }
229
230 if (g->ops.gr.is_valid_compute_class(g, class)) {
231 switch (compute_preempt_mode) {
232 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
233 gr_ctx->compute_preempt_mode =
234 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
235 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
236 break;
237 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
238 gr_ctx->compute_preempt_mode =
239 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
240 p->mode =
241 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
242 break;
243 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
244 gr_ctx->compute_preempt_mode =
245 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
246 p->mode =
247 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
248 break;
249 default:
250 break;
251 }
252 }
253
254 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
255 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
256 msg.handle = vgpu_get_handle(g);
257 p->gr_ctx_handle = gr_ctx->virt_ctx;
258 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
259 if (err || msg.ret) {
260 err = -ENOMEM;
261 goto fail;
262 }
263 }
264
265 return err;
266
267fail:
268 nvgpu_err(g, "%s failed %d", __func__, err);
269 return err;
270}
271
272int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
273 u32 graphics_preempt_mode,
274 u32 compute_preempt_mode)
275{
276 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
277 struct gk20a *g = ch->g;
278 struct tsg_gk20a *tsg;
279 struct vm_gk20a *vm;
280 u32 class;
281 int err;
282
283 class = ch->obj_class;
284 if (!class)
285 return -EINVAL;
286
287 /* skip setting anything if both modes are already set */
288 if (graphics_preempt_mode &&
289 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
290 graphics_preempt_mode = 0;
291
292 if (compute_preempt_mode &&
293 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
294 compute_preempt_mode = 0;
295
296 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
297 return 0;
298
299 if (gk20a_is_channel_marked_as_tsg(ch)) {
300 tsg = &g->fifo.tsg[ch->tsgid];
301 vm = tsg->vm;
302 } else {
303 vm = ch->vm;
304 }
305
306 if (g->ops.gr.set_ctxsw_preemption_mode) {
307 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
308 graphics_preempt_mode,
309 compute_preempt_mode);
310 if (err) {
311 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
312 return err;
313 }
314 } else {
315 err = -ENOSYS;
316 }
317
318 return err;
319}
320
321int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
322{
323 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
324 int err;
325
326 gk20a_dbg_fn("");
327
328 err = vgpu_gr_init_ctx_state(g);
329 if (err)
330 return err;
331
332 g->gr.t18x.ctx_vars.preempt_image_size =
333 priv->constants.preempt_ctx_size;
334 if (!g->gr.t18x.ctx_vars.preempt_image_size)
335 return -EINVAL;
336
337 return 0;
338}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h
deleted file mode 100644
index baf5a8e9..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_GR_GP10B_H__
24#define __VGPU_GR_GP10B_H__
25
26#include "gk20a/gk20a.h"
27
28void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
29 struct gr_ctx_desc *gr_ctx);
30int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
31 struct gr_ctx_desc **__gr_ctx,
32 struct vm_gk20a *vm,
33 u32 class,
34 u32 flags);
35int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
36 struct gr_ctx_desc *gr_ctx,
37 struct vm_gk20a *vm, u32 class,
38 u32 graphics_preempt_mode,
39 u32 compute_preempt_mode);
40int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
41 u32 graphics_preempt_mode,
42 u32 compute_preempt_mode);
43int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g);
44
45#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
deleted file mode 100644
index 55448f3b..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
+++ /dev/null
@@ -1,630 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vgpu/vgpu.h"
24#include "vgpu/fifo_vgpu.h"
25#include "vgpu/gr_vgpu.h"
26#include "vgpu/ltc_vgpu.h"
27#include "vgpu/mm_vgpu.h"
28#include "vgpu/dbg_vgpu.h"
29#include "vgpu/fecs_trace_vgpu.h"
30#include "vgpu/css_vgpu.h"
31#include "gp10b/gp10b.h"
32#include "gp10b/hal_gp10b.h"
33#include "vgpu/gm20b/vgpu_gr_gm20b.h"
34#include "vgpu_gr_gp10b.h"
35#include "vgpu_mm_gp10b.h"
36
37#include "gk20a/bus_gk20a.h"
38#include "gk20a/pramin_gk20a.h"
39#include "gk20a/flcn_gk20a.h"
40#include "gk20a/mc_gk20a.h"
41#include "gk20a/fb_gk20a.h"
42
43#include "gp10b/mc_gp10b.h"
44#include "gp10b/ltc_gp10b.h"
45#include "gp10b/mm_gp10b.h"
46#include "gp10b/ce_gp10b.h"
47#include "gp10b/fb_gp10b.h"
48#include "gp10b/pmu_gp10b.h"
49#include "gp10b/gr_ctx_gp10b.h"
50#include "gp10b/fifo_gp10b.h"
51#include "gp10b/gp10b_gating_reglist.h"
52#include "gp10b/regops_gp10b.h"
53#include "gp10b/therm_gp10b.h"
54#include "gp10b/priv_ring_gp10b.h"
55
56#include "gm20b/ltc_gm20b.h"
57#include "gm20b/gr_gm20b.h"
58#include "gm20b/fifo_gm20b.h"
59#include "gm20b/acr_gm20b.h"
60#include "gm20b/pmu_gm20b.h"
61#include "gm20b/fb_gm20b.h"
62#include "gm20b/mm_gm20b.h"
63
64#include <nvgpu/enabled.h>
65
66#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
67#include <nvgpu/hw/gp10b/hw_fifo_gp10b.h>
68#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
69#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
70#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
71#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
72
73static const struct gpu_ops vgpu_gp10b_ops = {
74 .ltc = {
75 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
76 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
77 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
78 .init_cbc = gm20b_ltc_init_cbc,
79 .init_fs_state = vgpu_ltc_init_fs_state,
80 .init_comptags = vgpu_ltc_init_comptags,
81 .cbc_ctrl = NULL,
82 .isr = gp10b_ltc_isr,
83 .cbc_fix_config = gm20b_ltc_cbc_fix_config,
84 .flush = gm20b_flush_ltc,
85 .set_enabled = gp10b_ltc_set_enabled,
86 },
87 .ce2 = {
88 .isr_stall = gp10b_ce_isr,
89 .isr_nonstall = gp10b_ce_nonstall_isr,
90 .get_num_pce = vgpu_ce_get_num_pce,
91 },
92 .gr = {
93 .get_patch_slots = gr_gk20a_get_patch_slots,
94 .init_gpc_mmu = gr_gm20b_init_gpc_mmu,
95 .bundle_cb_defaults = gr_gm20b_bundle_cb_defaults,
96 .cb_size_default = gr_gp10b_cb_size_default,
97 .calc_global_ctx_buffer_size =
98 gr_gp10b_calc_global_ctx_buffer_size,
99 .commit_global_attrib_cb = gr_gp10b_commit_global_attrib_cb,
100 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
101 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
102 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
103 .handle_sw_method = gr_gp10b_handle_sw_method,
104 .set_alpha_circular_buffer_size =
105 gr_gp10b_set_alpha_circular_buffer_size,
106 .set_circular_buffer_size = gr_gp10b_set_circular_buffer_size,
107 .enable_hww_exceptions = gr_gk20a_enable_hww_exceptions,
108 .is_valid_class = gr_gp10b_is_valid_class,
109 .is_valid_gfx_class = gr_gp10b_is_valid_gfx_class,
110 .is_valid_compute_class = gr_gp10b_is_valid_compute_class,
111 .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
112 .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
113 .init_fs_state = vgpu_gm20b_init_fs_state,
114 .set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
115 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
116 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
117 .set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask,
118 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
119 .free_channel_ctx = vgpu_gr_free_channel_ctx,
120 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
121 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
122 .get_zcull_info = vgpu_gr_get_zcull_info,
123 .is_tpc_addr = gr_gm20b_is_tpc_addr,
124 .get_tpc_num = gr_gm20b_get_tpc_num,
125 .detect_sm_arch = vgpu_gr_detect_sm_arch,
126 .add_zbc_color = gr_gp10b_add_zbc_color,
127 .add_zbc_depth = gr_gp10b_add_zbc_depth,
128 .zbc_set_table = vgpu_gr_add_zbc,
129 .zbc_query_table = vgpu_gr_query_zbc,
130 .pmu_save_zbc = gk20a_pmu_save_zbc,
131 .add_zbc = gr_gk20a_add_zbc,
132 .pagepool_default_size = gr_gp10b_pagepool_default_size,
133 .init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
134 .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
135 .free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
136 .update_ctxsw_preemption_mode =
137 gr_gp10b_update_ctxsw_preemption_mode,
138 .dump_gr_regs = NULL,
139 .update_pc_sampling = gr_gm20b_update_pc_sampling,
140 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
141 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
142 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
143 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
144 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
145 .init_sm_dsm_reg_info = gr_gm20b_init_sm_dsm_reg_info,
146 .wait_empty = gr_gp10b_wait_empty,
147 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
148 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
149 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
150 .bpt_reg_info = gr_gm20b_bpt_reg_info,
151 .get_access_map = gr_gp10b_get_access_map,
152 .handle_fecs_error = gr_gp10b_handle_fecs_error,
153 .handle_sm_exception = gr_gp10b_handle_sm_exception,
154 .handle_tex_exception = gr_gp10b_handle_tex_exception,
155 .enable_gpc_exceptions = gk20a_gr_enable_gpc_exceptions,
156 .enable_exceptions = gk20a_gr_enable_exceptions,
157 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
158 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
159 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
160 .record_sm_error_state = gm20b_gr_record_sm_error_state,
161 .update_sm_error_state = gm20b_gr_update_sm_error_state,
162 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
163 .suspend_contexts = vgpu_gr_suspend_contexts,
164 .resume_contexts = vgpu_gr_resume_contexts,
165 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
166 .init_sm_id_table = gr_gk20a_init_sm_id_table,
167 .load_smid_config = gr_gp10b_load_smid_config,
168 .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering,
169 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
170 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
171 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
172 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
173 .setup_rop_mapping = gr_gk20a_setup_rop_mapping,
174 .program_zcull_mapping = gr_gk20a_program_zcull_mapping,
175 .commit_global_timeslice = gr_gk20a_commit_global_timeslice,
176 .commit_inst = vgpu_gr_commit_inst,
177 .write_zcull_ptr = gr_gk20a_write_zcull_ptr,
178 .write_pm_ptr = gr_gk20a_write_pm_ptr,
179 .init_elcg_mode = gr_gk20a_init_elcg_mode,
180 .load_tpc_mask = gr_gm20b_load_tpc_mask,
181 .inval_icache = gr_gk20a_inval_icache,
182 .trigger_suspend = gr_gk20a_trigger_suspend,
183 .wait_for_pause = gr_gk20a_wait_for_pause,
184 .resume_from_pause = gr_gk20a_resume_from_pause,
185 .clear_sm_errors = gr_gk20a_clear_sm_errors,
186 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
187 .get_esr_sm_sel = gk20a_gr_get_esr_sm_sel,
188 .sm_debugger_attached = gk20a_gr_sm_debugger_attached,
189 .suspend_single_sm = gk20a_gr_suspend_single_sm,
190 .suspend_all_sms = gk20a_gr_suspend_all_sms,
191 .resume_single_sm = gk20a_gr_resume_single_sm,
192 .resume_all_sms = gk20a_gr_resume_all_sms,
193 .get_sm_hww_warp_esr = gp10b_gr_get_sm_hww_warp_esr,
194 .get_sm_hww_global_esr = gk20a_gr_get_sm_hww_global_esr,
195 .get_sm_no_lock_down_hww_global_esr_mask =
196 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask,
197 .lock_down_sm = gk20a_gr_lock_down_sm,
198 .wait_for_sm_lock_down = gk20a_gr_wait_for_sm_lock_down,
199 .clear_sm_hww = gm20b_gr_clear_sm_hww,
200 .init_ovr_sm_dsm_perf = gk20a_gr_init_ovr_sm_dsm_perf,
201 .get_ovr_perf_regs = gk20a_gr_get_ovr_perf_regs,
202 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
203 .set_boosted_ctx = NULL,
204 .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode,
205 .set_czf_bypass = gr_gp10b_set_czf_bypass,
206 .init_czf_bypass = gr_gp10b_init_czf_bypass,
207 .pre_process_sm_exception = gr_gp10b_pre_process_sm_exception,
208 .set_preemption_buffer_va = gr_gp10b_set_preemption_buffer_va,
209 .init_preemption_state = gr_gp10b_init_preemption_state,
210 .update_boosted_ctx = NULL,
211 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
212 .create_gr_sysfs = gr_gp10b_create_sysfs,
213 .set_ctxsw_preemption_mode =
214 vgpu_gr_gp10b_set_ctxsw_preemption_mode,
215 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
216 },
217 .fb = {
218 .reset = fb_gk20a_reset,
219 .init_hw = gk20a_fb_init_hw,
220 .init_fs_state = fb_gm20b_init_fs_state,
221 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
222 .set_use_full_comp_tag_line =
223 gm20b_fb_set_use_full_comp_tag_line,
224 .compression_page_size = gp10b_fb_compression_page_size,
225 .compressible_page_size = gp10b_fb_compressible_page_size,
226 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
227 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
228 .read_wpr_info = gm20b_fb_read_wpr_info,
229 .is_debug_mode_enabled = NULL,
230 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
231 .tlb_invalidate = vgpu_mm_tlb_invalidate,
232 },
233 .clock_gating = {
234 .slcg_bus_load_gating_prod =
235 gp10b_slcg_bus_load_gating_prod,
236 .slcg_ce2_load_gating_prod =
237 gp10b_slcg_ce2_load_gating_prod,
238 .slcg_chiplet_load_gating_prod =
239 gp10b_slcg_chiplet_load_gating_prod,
240 .slcg_ctxsw_firmware_load_gating_prod =
241 gp10b_slcg_ctxsw_firmware_load_gating_prod,
242 .slcg_fb_load_gating_prod =
243 gp10b_slcg_fb_load_gating_prod,
244 .slcg_fifo_load_gating_prod =
245 gp10b_slcg_fifo_load_gating_prod,
246 .slcg_gr_load_gating_prod =
247 gr_gp10b_slcg_gr_load_gating_prod,
248 .slcg_ltc_load_gating_prod =
249 ltc_gp10b_slcg_ltc_load_gating_prod,
250 .slcg_perf_load_gating_prod =
251 gp10b_slcg_perf_load_gating_prod,
252 .slcg_priring_load_gating_prod =
253 gp10b_slcg_priring_load_gating_prod,
254 .slcg_pmu_load_gating_prod =
255 gp10b_slcg_pmu_load_gating_prod,
256 .slcg_therm_load_gating_prod =
257 gp10b_slcg_therm_load_gating_prod,
258 .slcg_xbar_load_gating_prod =
259 gp10b_slcg_xbar_load_gating_prod,
260 .blcg_bus_load_gating_prod =
261 gp10b_blcg_bus_load_gating_prod,
262 .blcg_ce_load_gating_prod =
263 gp10b_blcg_ce_load_gating_prod,
264 .blcg_ctxsw_firmware_load_gating_prod =
265 gp10b_blcg_ctxsw_firmware_load_gating_prod,
266 .blcg_fb_load_gating_prod =
267 gp10b_blcg_fb_load_gating_prod,
268 .blcg_fifo_load_gating_prod =
269 gp10b_blcg_fifo_load_gating_prod,
270 .blcg_gr_load_gating_prod =
271 gp10b_blcg_gr_load_gating_prod,
272 .blcg_ltc_load_gating_prod =
273 gp10b_blcg_ltc_load_gating_prod,
274 .blcg_pwr_csb_load_gating_prod =
275 gp10b_blcg_pwr_csb_load_gating_prod,
276 .blcg_pmu_load_gating_prod =
277 gp10b_blcg_pmu_load_gating_prod,
278 .blcg_xbar_load_gating_prod =
279 gp10b_blcg_xbar_load_gating_prod,
280 .pg_gr_load_gating_prod =
281 gr_gp10b_pg_gr_load_gating_prod,
282 },
283 .fifo = {
284 .init_fifo_setup_hw = vgpu_init_fifo_setup_hw,
285 .bind_channel = vgpu_channel_bind,
286 .unbind_channel = vgpu_channel_unbind,
287 .disable_channel = vgpu_channel_disable,
288 .enable_channel = vgpu_channel_enable,
289 .alloc_inst = vgpu_channel_alloc_inst,
290 .free_inst = vgpu_channel_free_inst,
291 .setup_ramfc = vgpu_channel_setup_ramfc,
292 .channel_set_timeslice = vgpu_channel_set_timeslice,
293 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
294 .setup_userd = gk20a_fifo_setup_userd,
295 .userd_gp_get = gk20a_fifo_userd_gp_get,
296 .userd_gp_put = gk20a_fifo_userd_gp_put,
297 .userd_pb_get = gk20a_fifo_userd_pb_get,
298 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
299 .preempt_channel = vgpu_fifo_preempt_channel,
300 .preempt_tsg = vgpu_fifo_preempt_tsg,
301 .enable_tsg = vgpu_enable_tsg,
302 .disable_tsg = gk20a_disable_tsg,
303 .tsg_verify_channel_status = NULL,
304 .tsg_verify_status_ctx_reload = NULL,
305 .reschedule_runlist = NULL,
306 .update_runlist = vgpu_fifo_update_runlist,
307 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
308 .get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
309 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
310 .get_num_fifos = gm20b_fifo_get_num_fifos,
311 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
312 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
313 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
314 .tsg_open = vgpu_tsg_open,
315 .force_reset_ch = vgpu_fifo_force_reset_ch,
316 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
317 .device_info_data_parse = gp10b_device_info_data_parse,
318 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
319 .init_engine_info = vgpu_fifo_init_engine_info,
320 .runlist_entry_size = ram_rl_entry_size_v,
321 .get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
322 .get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
323 .is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
324 .dump_pbdma_status = gk20a_dump_pbdma_status,
325 .dump_eng_status = gk20a_dump_eng_status,
326 .dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
327 .intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
328 .is_preempt_pending = gk20a_fifo_is_preempt_pending,
329 .init_pbdma_intr_descs = gp10b_fifo_init_pbdma_intr_descs,
330 .reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
331 .teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
332 .handle_sched_error = gk20a_fifo_handle_sched_error,
333 .handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0,
334 .handle_pbdma_intr_1 = gk20a_fifo_handle_pbdma_intr_1,
335 .tsg_bind_channel = vgpu_tsg_bind_channel,
336 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
337#ifdef CONFIG_TEGRA_GK20A_NVHOST
338 .alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf,
339 .free_syncpt_buf = gk20a_fifo_free_syncpt_buf,
340 .add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd,
341 .get_syncpt_wait_cmd_size = gk20a_fifo_get_syncpt_wait_cmd_size,
342 .add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd,
343 .get_syncpt_incr_cmd_size = gk20a_fifo_get_syncpt_incr_cmd_size,
344#endif
345 .resetup_ramfc = NULL,
346 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
347 },
348 .gr_ctx = {
349 .get_netlist_name = gr_gp10b_get_netlist_name,
350 .is_fw_defined = gr_gp10b_is_firmware_defined,
351 },
352#ifdef CONFIG_GK20A_CTXSW_TRACE
353 .fecs_trace = {
354 .alloc_user_buffer = vgpu_alloc_user_buffer,
355 .free_user_buffer = vgpu_free_user_buffer,
356 .mmap_user_buffer = vgpu_mmap_user_buffer,
357 .init = vgpu_fecs_trace_init,
358 .deinit = vgpu_fecs_trace_deinit,
359 .enable = vgpu_fecs_trace_enable,
360 .disable = vgpu_fecs_trace_disable,
361 .is_enabled = vgpu_fecs_trace_is_enabled,
362 .reset = NULL,
363 .flush = NULL,
364 .poll = vgpu_fecs_trace_poll,
365 .bind_channel = NULL,
366 .unbind_channel = NULL,
367 .max_entries = vgpu_fecs_trace_max_entries,
368 .set_filter = vgpu_fecs_trace_set_filter,
369 },
370#endif /* CONFIG_GK20A_CTXSW_TRACE */
371 .mm = {
372 /* FIXME: add support for sparse mappings */
373 .support_sparse = NULL,
374 .gmmu_map = vgpu_gp10b_locked_gmmu_map,
375 .gmmu_unmap = vgpu_locked_gmmu_unmap,
376 .vm_bind_channel = vgpu_vm_bind_channel,
377 .fb_flush = vgpu_mm_fb_flush,
378 .l2_invalidate = vgpu_mm_l2_invalidate,
379 .l2_flush = vgpu_mm_l2_flush,
380 .cbc_clean = gk20a_mm_cbc_clean,
381 .set_big_page_size = gm20b_mm_set_big_page_size,
382 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
383 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
384 .gpu_phys_addr = gm20b_gpu_phys_addr,
385 .get_iommu_bit = gk20a_mm_get_iommu_bit,
386 .get_mmu_levels = gp10b_mm_get_mmu_levels,
387 .init_pdb = gp10b_mm_init_pdb,
388 .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw,
389 .is_bar1_supported = gm20b_mm_is_bar1_supported,
390 .init_inst_block = gk20a_init_inst_block,
391 .mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
392 .init_bar2_vm = gb10b_init_bar2_vm,
393 .init_bar2_mm_hw_setup = gb10b_init_bar2_mm_hw_setup,
394 .remove_bar2_vm = gp10b_remove_bar2_vm,
395 .get_kind_invalid = gm20b_get_kind_invalid,
396 .get_kind_pitch = gm20b_get_kind_pitch,
397 },
398 .pramin = {
399 .enter = gk20a_pramin_enter,
400 .exit = gk20a_pramin_exit,
401 .data032_r = pram_data032_r,
402 },
403 .therm = {
404 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
405 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
406 },
407 .pmu = {
408 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
409 .pmu_get_queue_head = pwr_pmu_queue_head_r,
410 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
411 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
412 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
413 .pmu_queue_head = gk20a_pmu_queue_head,
414 .pmu_queue_tail = gk20a_pmu_queue_tail,
415 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
416 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
417 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
418 .pmu_mutex_release = gk20a_pmu_mutex_release,
419 .write_dmatrfbase = gp10b_write_dmatrfbase,
420 .pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
421 .pmu_pg_init_param = gp10b_pg_gr_init,
422 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
423 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
424 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
425 .reset_engine = gk20a_pmu_engine_reset,
426 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
427 },
428 .regops = {
429 .get_global_whitelist_ranges =
430 gp10b_get_global_whitelist_ranges,
431 .get_global_whitelist_ranges_count =
432 gp10b_get_global_whitelist_ranges_count,
433 .get_context_whitelist_ranges =
434 gp10b_get_context_whitelist_ranges,
435 .get_context_whitelist_ranges_count =
436 gp10b_get_context_whitelist_ranges_count,
437 .get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
438 .get_runcontrol_whitelist_count =
439 gp10b_get_runcontrol_whitelist_count,
440 .get_runcontrol_whitelist_ranges =
441 gp10b_get_runcontrol_whitelist_ranges,
442 .get_runcontrol_whitelist_ranges_count =
443 gp10b_get_runcontrol_whitelist_ranges_count,
444 .get_qctl_whitelist = gp10b_get_qctl_whitelist,
445 .get_qctl_whitelist_count = gp10b_get_qctl_whitelist_count,
446 .get_qctl_whitelist_ranges = gp10b_get_qctl_whitelist_ranges,
447 .get_qctl_whitelist_ranges_count =
448 gp10b_get_qctl_whitelist_ranges_count,
449 .apply_smpc_war = gp10b_apply_smpc_war,
450 },
451 .mc = {
452 .intr_enable = mc_gp10b_intr_enable,
453 .intr_unit_config = mc_gp10b_intr_unit_config,
454 .isr_stall = mc_gp10b_isr_stall,
455 .intr_stall = mc_gp10b_intr_stall,
456 .intr_stall_pause = mc_gp10b_intr_stall_pause,
457 .intr_stall_resume = mc_gp10b_intr_stall_resume,
458 .intr_nonstall = mc_gp10b_intr_nonstall,
459 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
460 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
461 .enable = gk20a_mc_enable,
462 .disable = gk20a_mc_disable,
463 .reset = gk20a_mc_reset,
464 .boot_0 = gk20a_mc_boot_0,
465 .is_intr1_pending = mc_gp10b_is_intr1_pending,
466 },
467 .debug = {
468 .show_dump = NULL,
469 },
470 .dbg_session_ops = {
471 .exec_reg_ops = vgpu_exec_regops,
472 .dbg_set_powergate = vgpu_dbg_set_powergate,
473 .check_and_set_global_reservation =
474 vgpu_check_and_set_global_reservation,
475 .check_and_set_context_reservation =
476 vgpu_check_and_set_context_reservation,
477 .release_profiler_reservation =
478 vgpu_release_profiler_reservation,
479 .perfbuffer_enable = vgpu_perfbuffer_enable,
480 .perfbuffer_disable = vgpu_perfbuffer_disable,
481 },
482 .bus = {
483 .init_hw = gk20a_bus_init_hw,
484 .isr = gk20a_bus_isr,
485 .read_ptimer = vgpu_read_ptimer,
486 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
487 .bar1_bind = gk20a_bus_bar1_bind,
488 },
489#if defined(CONFIG_GK20A_CYCLE_STATS)
490 .css = {
491 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
492 .disable_snapshot = vgpu_css_release_snapshot_buffer,
493 .check_data_available = vgpu_css_flush_snapshots,
494 .detach_snapshot = vgpu_css_detach,
495 .set_handled_snapshots = NULL,
496 .allocate_perfmon_ids = NULL,
497 .release_perfmon_ids = NULL,
498 },
499#endif
500 .falcon = {
501 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
502 },
503 .priv_ring = {
504 .isr = gp10b_priv_ring_isr,
505 },
506 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
507 .get_litter_value = gp10b_get_litter_value,
508};
509
510int vgpu_gp10b_init_hal(struct gk20a *g)
511{
512 struct gpu_ops *gops = &g->ops;
513 u32 val;
514
515 gops->ltc = vgpu_gp10b_ops.ltc;
516 gops->ce2 = vgpu_gp10b_ops.ce2;
517 gops->gr = vgpu_gp10b_ops.gr;
518 gops->fb = vgpu_gp10b_ops.fb;
519 gops->clock_gating = vgpu_gp10b_ops.clock_gating;
520 gops->fifo = vgpu_gp10b_ops.fifo;
521 gops->gr_ctx = vgpu_gp10b_ops.gr_ctx;
522 gops->fecs_trace = vgpu_gp10b_ops.fecs_trace;
523 gops->mm = vgpu_gp10b_ops.mm;
524 gops->pramin = vgpu_gp10b_ops.pramin;
525 gops->therm = vgpu_gp10b_ops.therm;
526 gops->pmu = vgpu_gp10b_ops.pmu;
527 gops->regops = vgpu_gp10b_ops.regops;
528 gops->mc = vgpu_gp10b_ops.mc;
529 gops->debug = vgpu_gp10b_ops.debug;
530 gops->dbg_session_ops = vgpu_gp10b_ops.dbg_session_ops;
531 gops->bus = vgpu_gp10b_ops.bus;
532#if defined(CONFIG_GK20A_CYCLE_STATS)
533 gops->css = vgpu_gp10b_ops.css;
534#endif
535 gops->falcon = vgpu_gp10b_ops.falcon;
536
537 gops->priv_ring = vgpu_gp10b_ops.priv_ring;
538
539 /* Lone Functions */
540 gops->chip_init_gpu_characteristics =
541 vgpu_gp10b_ops.chip_init_gpu_characteristics;
542 gops->get_litter_value = vgpu_gp10b_ops.get_litter_value;
543
544 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
545 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
546
547#ifdef CONFIG_TEGRA_ACR
548 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
549 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
550 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
551 } else if (g->is_virtual) {
552 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
553 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
554 } else {
555 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
556 if (val) {
557 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
558 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
559 } else {
560 gk20a_dbg_info("priv security is disabled in HW");
561 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
562 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
563 }
564 }
565#else
566 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
567 gk20a_dbg_info("running simulator with PRIV security disabled");
568 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
569 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
570 } else {
571 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
572 if (val) {
573 gk20a_dbg_info("priv security is not supported but enabled");
574 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
575 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
576 return -EPERM;
577 } else {
578 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
579 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
580 }
581 }
582#endif
583
584 /* priv security dependent ops */
585 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
586 /* Add in ops from gm20b acr */
587 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
588 gops->pmu.prepare_ucode = prepare_ucode_blob,
589 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
590 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
591 gops->pmu.is_priv_load = gm20b_is_priv_load,
592 gops->pmu.get_wpr = gm20b_wpr_info,
593 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
594 gops->pmu.pmu_populate_loader_cfg =
595 gm20b_pmu_populate_loader_cfg,
596 gops->pmu.flcn_populate_bl_dmem_desc =
597 gm20b_flcn_populate_bl_dmem_desc,
598 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
599 gops->pmu.falcon_clear_halt_interrupt_status =
600 clear_halt_interrupt_status,
601 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
602
603 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
604 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
605 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
606 gops->pmu.is_priv_load = gp10b_is_priv_load;
607
608 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
609 } else {
610 /* Inherit from gk20a */
611 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
612 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
613 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
614 gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
615
616 gops->pmu.load_lsfalcon_ucode = NULL;
617 gops->pmu.init_wpr_region = NULL;
618 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
619
620 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
621 }
622
623 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
624 g->pmu_lsf_pmu_wpr_init_done = 0;
625 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
626
627 g->name = "gp10b";
628
629 return 0;
630}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
deleted file mode 100644
index 5b48cca8..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <uapi/linux/nvgpu.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu_mm_gp10b.h"
29#include "gk20a/mm_gk20a.h"
30
31#include <nvgpu/bug.h>
32
33int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
34{
35 g->mm.bypass_smmu = true;
36 g->mm.disable_bigpage = true;
37 return 0;
38}
39
40static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
41 u64 addr, u64 size, size_t *oob_size)
42{
43 if (*oob_size < sizeof(*mem_desc))
44 return -ENOMEM;
45
46 mem_desc->addr = addr;
47 mem_desc->length = size;
48 *oob_size -= sizeof(*mem_desc);
49 return 0;
50}
51
52u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
53 u64 map_offset,
54 struct nvgpu_sgt *sgt,
55 u64 buffer_offset,
56 u64 size,
57 int pgsz_idx,
58 u8 kind_v,
59 u32 ctag_offset,
60 u32 flags,
61 int rw_flag,
62 bool clear_ctags,
63 bool sparse,
64 bool priv,
65 struct vm_gk20a_mapping_batch *batch,
66 enum nvgpu_aperture aperture)
67{
68 int err = 0;
69 struct gk20a *g = gk20a_from_vm(vm);
70 struct tegra_vgpu_cmd_msg msg;
71 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
72 struct tegra_vgpu_mem_desc *mem_desc;
73 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
74 u64 buffer_size = PAGE_ALIGN(size);
75 u64 space_to_skip = buffer_offset;
76 u32 mem_desc_count = 0, i;
77 void *handle = NULL;
78 size_t oob_size;
79 u8 prot;
80 void *sgl;
81
82 gk20a_dbg_fn("");
83
84 /* FIXME: add support for sparse mappings */
85
86 if (WARN_ON(!sgt) || WARN_ON(!g->mm.bypass_smmu))
87 return 0;
88
89 if (space_to_skip & (page_size - 1))
90 return 0;
91
92 memset(&msg, 0, sizeof(msg));
93
94 /* Allocate (or validate when map_offset != 0) the virtual address. */
95 if (!map_offset) {
96 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
97 if (!map_offset) {
98 nvgpu_err(g, "failed to allocate va space");
99 err = -ENOMEM;
100 goto fail;
101 }
102 }
103
104 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
105 tegra_gr_comm_get_server_vmid(),
106 TEGRA_VGPU_QUEUE_CMD,
107 (void **)&mem_desc, &oob_size);
108 if (!handle) {
109 err = -EINVAL;
110 goto fail;
111 }
112 sgl = sgt->sgl;
113 while (sgl) {
114 u64 phys_addr;
115 u64 chunk_length;
116
117 /*
118 * Cut out sgl ents for space_to_skip.
119 */
120 if (space_to_skip &&
121 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
122 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
123 sgl = nvgpu_sgt_get_next(sgt, sgl);
124 continue;
125 }
126
127 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
128 chunk_length = min(size,
129 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
130
131 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
132 chunk_length, &oob_size)) {
133 err = -ENOMEM;
134 goto fail;
135 }
136
137 space_to_skip = 0;
138 size -= chunk_length;
139 sgl = nvgpu_sgt_get_next(sgt, sgl);
140
141 if (size == 0)
142 break;
143 }
144
145 if (rw_flag == gk20a_mem_flag_read_only)
146 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
147 else if (rw_flag == gk20a_mem_flag_write_only)
148 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
149 else
150 prot = TEGRA_VGPU_MAP_PROT_NONE;
151
152 if (pgsz_idx == gmmu_page_size_kernel) {
153 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
154 pgsz_idx = gmmu_page_size_small;
155 } else if (page_size ==
156 vm->gmmu_page_sizes[gmmu_page_size_big]) {
157 pgsz_idx = gmmu_page_size_big;
158 } else {
159 nvgpu_err(g, "invalid kernel page size %d",
160 page_size);
161 goto fail;
162 }
163 }
164
165 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
166 msg.handle = vgpu_get_handle(g);
167 p->handle = vm->handle;
168 p->gpu_va = map_offset;
169 p->size = buffer_size;
170 p->mem_desc_count = mem_desc_count;
171 p->pgsz_idx = pgsz_idx;
172 p->iova = 0;
173 p->kind = kind_v;
174 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
175 p->prot = prot;
176 p->ctag_offset = ctag_offset;
177 p->clear_ctags = clear_ctags;
178 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
179 if (err || msg.ret)
180 goto fail;
181
182 /* TLB invalidate handled on server side */
183
184 tegra_gr_comm_oob_put_ptr(handle);
185 return map_offset;
186fail:
187 if (handle)
188 tegra_gr_comm_oob_put_ptr(handle);
189 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
190 nvgpu_err(g,
191 " Map: %-5s GPU virt %#-12llx +%#-9llx "
192 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
193 "kind=%#02x APT=%-6s",
194 vm->name, map_offset, buffer_size, buffer_offset,
195 vm->gmmu_page_sizes[pgsz_idx] >> 10,
196 nvgpu_gmmu_perm_str(rw_flag),
197 kind_v, "SYSMEM");
198 for (i = 0; i < mem_desc_count; i++)
199 nvgpu_err(g, " > 0x%010llx + 0x%llx",
200 mem_desc[i].addr, mem_desc[i].length);
201
202 return 0;
203}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
deleted file mode 100644
index fd6760ff..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_MM_GP10B_H__
24#define __VGPU_MM_GP10B_H__
25
26#include "gk20a/gk20a.h"
27
28u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
29 u64 map_offset,
30 struct nvgpu_sgt *sgt,
31 u64 buffer_offset,
32 u64 size,
33 int pgsz_idx,
34 u8 kind_v,
35 u32 ctag_offset,
36 u32 flags,
37 int rw_flag,
38 bool clear_ctags,
39 bool sparse,
40 bool priv,
41 struct vm_gk20a_mapping_batch *batch,
42 enum nvgpu_aperture aperture);
43int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g);
44
45#endif