summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gp10b
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/common/linux/vgpu/gp10b
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fifo_gp10b.c24
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.c38
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.h30
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c302
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.h37
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_hal_gp10b.c607
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c200
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.h39
8 files changed, 0 insertions, 1277 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fifo_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fifo_gp10b.c
deleted file mode 100644
index cc006f76..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fifo_gp10b.c
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "vgpu_fifo_gp10b.h"
18
19void vgpu_gp10b_init_fifo_ops(struct gpu_ops *gops)
20{
21 /* syncpoint protection not supported yet */
22 gops->fifo.resetup_ramfc = NULL;
23 gops->fifo.reschedule_runlist = NULL;
24}
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.c
deleted file mode 100644
index 5ee5d1f6..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/enabled.h>
24
25#include "gk20a/gk20a.h"
26
27int vgpu_gp10b_fuse_check_priv_security(struct gk20a *g)
28{
29 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
30 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
31 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
32 } else {
33 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
34 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
35 }
36
37 return 0;
38}
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.h b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.h
deleted file mode 100644
index 2ec8f284..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_fuse_gp10b.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_GP10B_FUSE
24#define _VGPU_GP10B_FUSE
25
26struct gk20a;
27
28int vgpu_gp10b_fuse_check_priv_security(struct gk20a *g);
29
30#endif
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
deleted file mode 100644
index 9adf20d1..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c
+++ /dev/null
@@ -1,302 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/kmem.h>
18#include <nvgpu/dma.h>
19#include <nvgpu/bug.h>
20
21#include "common/linux/vgpu/vgpu.h"
22#include "common/linux/vgpu/gm20b/vgpu_gr_gm20b.h"
23
24#include "gp10b/gr_gp10b.h"
25#include "vgpu_gr_gp10b.h"
26
27#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
28
29int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
30 struct nvgpu_gr_ctx *gr_ctx,
31 struct vm_gk20a *vm,
32 u32 class,
33 u32 flags)
34{
35 u32 graphics_preempt_mode = 0;
36 u32 compute_preempt_mode = 0;
37 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
38 int err;
39
40 gk20a_dbg_fn("");
41
42 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
43 if (err)
44 return err;
45
46 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
47 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
48 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
49 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
50
51 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
52 !compute_preempt_mode) {
53 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
54 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
55 compute_preempt_mode =
56 g->ops.gr.is_valid_compute_class(g, class) ?
57 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
58 }
59
60 if (graphics_preempt_mode || compute_preempt_mode) {
61 if (g->ops.gr.set_ctxsw_preemption_mode) {
62 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
63 class, graphics_preempt_mode, compute_preempt_mode);
64 if (err) {
65 nvgpu_err(g,
66 "set_ctxsw_preemption_mode failed");
67 goto fail;
68 }
69 } else {
70 err = -ENOSYS;
71 goto fail;
72 }
73 }
74
75 gk20a_dbg_fn("done");
76 return err;
77
78fail:
79 vgpu_gr_free_gr_ctx(g, vm, gr_ctx);
80 return err;
81}
82
83int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
84 struct nvgpu_gr_ctx *gr_ctx,
85 struct vm_gk20a *vm, u32 class,
86 u32 graphics_preempt_mode,
87 u32 compute_preempt_mode)
88{
89 struct tegra_vgpu_cmd_msg msg = {};
90 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
91 &msg.params.gr_bind_ctxsw_buffers;
92 int err = 0;
93
94 if (g->ops.gr.is_valid_gfx_class(g, class) &&
95 g->gr.ctx_vars.force_preemption_gfxp)
96 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
97
98 if (g->ops.gr.is_valid_compute_class(g, class) &&
99 g->gr.ctx_vars.force_preemption_cilp)
100 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
101
102 /* check for invalid combinations */
103 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
104 return -EINVAL;
105
106 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
107 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
108 return -EINVAL;
109
110 /* set preemption modes */
111 switch (graphics_preempt_mode) {
112 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
113 {
114 u32 spill_size =
115 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
116 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
117 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
118 gr_scc_pagepool_total_pages_byte_granularity_v();
119 u32 betacb_size = g->gr.attrib_cb_default_size +
120 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
121 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
122 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
123 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
124 g->gr.max_tpc_count;
125 struct nvgpu_mem *desc;
126
127 attrib_cb_size = ALIGN(attrib_cb_size, 128);
128
129 gk20a_dbg_info("gfxp context preempt size=%d",
130 g->gr.ctx_vars.preempt_image_size);
131 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
132 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
133 gk20a_dbg_info("gfxp context attrib cb size=%d",
134 attrib_cb_size);
135
136 err = gr_gp10b_alloc_buffer(vm,
137 g->gr.ctx_vars.preempt_image_size,
138 &gr_ctx->preempt_ctxsw_buffer);
139 if (err) {
140 err = -ENOMEM;
141 goto fail;
142 }
143 desc = &gr_ctx->preempt_ctxsw_buffer;
144 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
145 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
146
147 err = gr_gp10b_alloc_buffer(vm,
148 spill_size,
149 &gr_ctx->spill_ctxsw_buffer);
150 if (err) {
151 err = -ENOMEM;
152 goto fail;
153 }
154 desc = &gr_ctx->spill_ctxsw_buffer;
155 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
156 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
157
158 err = gr_gp10b_alloc_buffer(vm,
159 pagepool_size,
160 &gr_ctx->pagepool_ctxsw_buffer);
161 if (err) {
162 err = -ENOMEM;
163 goto fail;
164 }
165 desc = &gr_ctx->pagepool_ctxsw_buffer;
166 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
167 desc->gpu_va;
168 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
169
170 err = gr_gp10b_alloc_buffer(vm,
171 attrib_cb_size,
172 &gr_ctx->betacb_ctxsw_buffer);
173 if (err) {
174 err = -ENOMEM;
175 goto fail;
176 }
177 desc = &gr_ctx->betacb_ctxsw_buffer;
178 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
179 desc->gpu_va;
180 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
181
182 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
183 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
184 break;
185 }
186 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
187 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
188 break;
189
190 default:
191 break;
192 }
193
194 if (g->ops.gr.is_valid_compute_class(g, class)) {
195 switch (compute_preempt_mode) {
196 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
197 gr_ctx->compute_preempt_mode =
198 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
199 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
200 break;
201 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
202 gr_ctx->compute_preempt_mode =
203 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
204 p->mode =
205 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
206 break;
207 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
208 gr_ctx->compute_preempt_mode =
209 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
210 p->mode =
211 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
212 break;
213 default:
214 break;
215 }
216 }
217
218 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
219 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
220 msg.handle = vgpu_get_handle(g);
221 p->gr_ctx_handle = gr_ctx->virt_ctx;
222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
223 if (err || msg.ret) {
224 err = -ENOMEM;
225 goto fail;
226 }
227 }
228
229 return err;
230
231fail:
232 nvgpu_err(g, "%s failed %d", __func__, err);
233 return err;
234}
235
236int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
237 u32 graphics_preempt_mode,
238 u32 compute_preempt_mode)
239{
240 struct nvgpu_gr_ctx *gr_ctx;
241 struct gk20a *g = ch->g;
242 struct tsg_gk20a *tsg;
243 struct vm_gk20a *vm;
244 u32 class;
245 int err;
246
247 class = ch->obj_class;
248 if (!class)
249 return -EINVAL;
250
251 tsg = tsg_gk20a_from_ch(ch);
252 if (!tsg)
253 return -EINVAL;
254
255 vm = tsg->vm;
256 gr_ctx = &tsg->gr_ctx;
257
258 /* skip setting anything if both modes are already set */
259 if (graphics_preempt_mode &&
260 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
261 graphics_preempt_mode = 0;
262
263 if (compute_preempt_mode &&
264 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
265 compute_preempt_mode = 0;
266
267 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
268 return 0;
269
270 if (g->ops.gr.set_ctxsw_preemption_mode) {
271 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
272 graphics_preempt_mode,
273 compute_preempt_mode);
274 if (err) {
275 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
276 return err;
277 }
278 } else {
279 err = -ENOSYS;
280 }
281
282 return err;
283}
284
285int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
286{
287 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
288 int err;
289
290 gk20a_dbg_fn("");
291
292 err = vgpu_gr_init_ctx_state(g);
293 if (err)
294 return err;
295
296 g->gr.ctx_vars.preempt_image_size =
297 priv->constants.preempt_ctx_size;
298 if (!g->gr.ctx_vars.preempt_image_size)
299 return -EINVAL;
300
301 return 0;
302}
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.h b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.h
deleted file mode 100644
index 559bd227..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __VGPU_GR_GP10B_H__
18#define __VGPU_GR_GP10B_H__
19
20#include "gk20a/gk20a.h"
21
22int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
23 struct nvgpu_gr_ctx *gr_ctx,
24 struct vm_gk20a *vm,
25 u32 class,
26 u32 flags);
27int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
28 struct nvgpu_gr_ctx *gr_ctx,
29 struct vm_gk20a *vm, u32 class,
30 u32 graphics_preempt_mode,
31 u32 compute_preempt_mode);
32int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
33 u32 graphics_preempt_mode,
34 u32 compute_preempt_mode);
35int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g);
36
37#endif
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_hal_gp10b.c
deleted file mode 100644
index 39b92263..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_hal_gp10b.c
+++ /dev/null
@@ -1,607 +0,0 @@
1/*
2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "common/linux/vgpu/vgpu.h"
18#include "common/linux/vgpu/fifo_vgpu.h"
19#include "common/linux/vgpu/gr_vgpu.h"
20#include "common/linux/vgpu/ltc_vgpu.h"
21#include "common/linux/vgpu/mm_vgpu.h"
22#include "common/linux/vgpu/dbg_vgpu.h"
23#include "common/linux/vgpu/fecs_trace_vgpu.h"
24#include "common/linux/vgpu/css_vgpu.h"
25#include "gp10b/gp10b.h"
26#include "gp10b/hal_gp10b.h"
27#include "common/linux/vgpu/gm20b/vgpu_gr_gm20b.h"
28#include "vgpu_gr_gp10b.h"
29#include "vgpu_mm_gp10b.h"
30#include "vgpu_fuse_gp10b.h"
31
32#include "gk20a/bus_gk20a.h"
33#include "gk20a/pramin_gk20a.h"
34#include "gk20a/flcn_gk20a.h"
35#include "gk20a/mc_gk20a.h"
36#include "gk20a/fb_gk20a.h"
37
38#include "gp10b/mc_gp10b.h"
39#include "gp10b/ltc_gp10b.h"
40#include "gp10b/mm_gp10b.h"
41#include "gp10b/ce_gp10b.h"
42#include "gp10b/fb_gp10b.h"
43#include "gp10b/pmu_gp10b.h"
44#include "gp10b/gr_gp10b.h"
45#include "gp10b/gr_ctx_gp10b.h"
46#include "gp10b/fifo_gp10b.h"
47#include "gp10b/gp10b_gating_reglist.h"
48#include "gp10b/regops_gp10b.h"
49#include "gp10b/therm_gp10b.h"
50#include "gp10b/priv_ring_gp10b.h"
51
52#include "gm20b/ltc_gm20b.h"
53#include "gm20b/gr_gm20b.h"
54#include "gm20b/fifo_gm20b.h"
55#include "gm20b/acr_gm20b.h"
56#include "gm20b/pmu_gm20b.h"
57#include "gm20b/fb_gm20b.h"
58#include "gm20b/mm_gm20b.h"
59
60#include <nvgpu/enabled.h>
61
62#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
63#include <nvgpu/hw/gp10b/hw_fifo_gp10b.h>
64#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
65#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
66#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
67#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
68
69static const struct gpu_ops vgpu_gp10b_ops = {
70 .ltc = {
71 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
72 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
73 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
74 .init_cbc = gm20b_ltc_init_cbc,
75 .init_fs_state = vgpu_ltc_init_fs_state,
76 .init_comptags = vgpu_ltc_init_comptags,
77 .cbc_ctrl = NULL,
78 .isr = gp10b_ltc_isr,
79 .cbc_fix_config = gm20b_ltc_cbc_fix_config,
80 .flush = gm20b_flush_ltc,
81 .set_enabled = gp10b_ltc_set_enabled,
82 },
83 .ce2 = {
84 .isr_stall = gp10b_ce_isr,
85 .isr_nonstall = gp10b_ce_nonstall_isr,
86 .get_num_pce = vgpu_ce_get_num_pce,
87 },
88 .gr = {
89 .get_patch_slots = gr_gk20a_get_patch_slots,
90 .init_gpc_mmu = gr_gm20b_init_gpc_mmu,
91 .bundle_cb_defaults = gr_gm20b_bundle_cb_defaults,
92 .cb_size_default = gr_gp10b_cb_size_default,
93 .calc_global_ctx_buffer_size =
94 gr_gp10b_calc_global_ctx_buffer_size,
95 .commit_global_attrib_cb = gr_gp10b_commit_global_attrib_cb,
96 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
97 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
98 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
99 .handle_sw_method = gr_gp10b_handle_sw_method,
100 .set_alpha_circular_buffer_size =
101 gr_gp10b_set_alpha_circular_buffer_size,
102 .set_circular_buffer_size = gr_gp10b_set_circular_buffer_size,
103 .enable_hww_exceptions = gr_gk20a_enable_hww_exceptions,
104 .is_valid_class = gr_gp10b_is_valid_class,
105 .is_valid_gfx_class = gr_gp10b_is_valid_gfx_class,
106 .is_valid_compute_class = gr_gp10b_is_valid_compute_class,
107 .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
108 .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
109 .init_fs_state = vgpu_gr_init_fs_state,
110 .set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
111 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
112 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
113 .set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask,
114 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
115 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
116 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
117 .get_zcull_info = vgpu_gr_get_zcull_info,
118 .is_tpc_addr = gr_gm20b_is_tpc_addr,
119 .get_tpc_num = gr_gm20b_get_tpc_num,
120 .detect_sm_arch = vgpu_gr_detect_sm_arch,
121 .add_zbc_color = gr_gp10b_add_zbc_color,
122 .add_zbc_depth = gr_gp10b_add_zbc_depth,
123 .zbc_set_table = vgpu_gr_add_zbc,
124 .zbc_query_table = vgpu_gr_query_zbc,
125 .pmu_save_zbc = gk20a_pmu_save_zbc,
126 .add_zbc = gr_gk20a_add_zbc,
127 .pagepool_default_size = gr_gp10b_pagepool_default_size,
128 .init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
129 .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
130 .free_gr_ctx = vgpu_gr_free_gr_ctx,
131 .update_ctxsw_preemption_mode =
132 gr_gp10b_update_ctxsw_preemption_mode,
133 .dump_gr_regs = NULL,
134 .update_pc_sampling = gr_gm20b_update_pc_sampling,
135 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
136 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
137 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
138 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
139 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
140 .init_sm_dsm_reg_info = gr_gm20b_init_sm_dsm_reg_info,
141 .wait_empty = gr_gp10b_wait_empty,
142 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
143 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
144 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
145 .bpt_reg_info = gr_gm20b_bpt_reg_info,
146 .get_access_map = gr_gp10b_get_access_map,
147 .handle_fecs_error = gr_gp10b_handle_fecs_error,
148 .handle_sm_exception = gr_gp10b_handle_sm_exception,
149 .handle_tex_exception = gr_gp10b_handle_tex_exception,
150 .enable_gpc_exceptions = gk20a_gr_enable_gpc_exceptions,
151 .enable_exceptions = gk20a_gr_enable_exceptions,
152 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
153 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
154 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
155 .record_sm_error_state = gm20b_gr_record_sm_error_state,
156 .update_sm_error_state = gm20b_gr_update_sm_error_state,
157 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
158 .suspend_contexts = vgpu_gr_suspend_contexts,
159 .resume_contexts = vgpu_gr_resume_contexts,
160 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
161 .init_sm_id_table = vgpu_gr_init_sm_id_table,
162 .load_smid_config = gr_gp10b_load_smid_config,
163 .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering,
164 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
165 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
166 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
167 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
168 .setup_rop_mapping = gr_gk20a_setup_rop_mapping,
169 .program_zcull_mapping = gr_gk20a_program_zcull_mapping,
170 .commit_global_timeslice = gr_gk20a_commit_global_timeslice,
171 .commit_inst = vgpu_gr_commit_inst,
172 .write_zcull_ptr = gr_gk20a_write_zcull_ptr,
173 .write_pm_ptr = gr_gk20a_write_pm_ptr,
174 .init_elcg_mode = gr_gk20a_init_elcg_mode,
175 .load_tpc_mask = gr_gm20b_load_tpc_mask,
176 .inval_icache = gr_gk20a_inval_icache,
177 .trigger_suspend = gr_gk20a_trigger_suspend,
178 .wait_for_pause = gr_gk20a_wait_for_pause,
179 .resume_from_pause = gr_gk20a_resume_from_pause,
180 .clear_sm_errors = gr_gk20a_clear_sm_errors,
181 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
182 .get_esr_sm_sel = gk20a_gr_get_esr_sm_sel,
183 .sm_debugger_attached = gk20a_gr_sm_debugger_attached,
184 .suspend_single_sm = gk20a_gr_suspend_single_sm,
185 .suspend_all_sms = gk20a_gr_suspend_all_sms,
186 .resume_single_sm = gk20a_gr_resume_single_sm,
187 .resume_all_sms = gk20a_gr_resume_all_sms,
188 .get_sm_hww_warp_esr = gp10b_gr_get_sm_hww_warp_esr,
189 .get_sm_hww_global_esr = gk20a_gr_get_sm_hww_global_esr,
190 .get_sm_no_lock_down_hww_global_esr_mask =
191 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask,
192 .lock_down_sm = gk20a_gr_lock_down_sm,
193 .wait_for_sm_lock_down = gk20a_gr_wait_for_sm_lock_down,
194 .clear_sm_hww = gm20b_gr_clear_sm_hww,
195 .init_ovr_sm_dsm_perf = gk20a_gr_init_ovr_sm_dsm_perf,
196 .get_ovr_perf_regs = gk20a_gr_get_ovr_perf_regs,
197 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
198 .set_boosted_ctx = NULL,
199 .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode,
200 .set_czf_bypass = gr_gp10b_set_czf_bypass,
201 .init_czf_bypass = gr_gp10b_init_czf_bypass,
202 .pre_process_sm_exception = gr_gp10b_pre_process_sm_exception,
203 .set_preemption_buffer_va = gr_gp10b_set_preemption_buffer_va,
204 .init_preemption_state = gr_gp10b_init_preemption_state,
205 .update_boosted_ctx = NULL,
206 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
207 .set_bes_crop_debug4 = gr_gp10b_set_bes_crop_debug4,
208 .create_gr_sysfs = gr_gp10b_create_sysfs,
209 .set_ctxsw_preemption_mode =
210 vgpu_gr_gp10b_set_ctxsw_preemption_mode,
211 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
212 .init_gfxp_wfi_timeout_count =
213 gr_gp10b_init_gfxp_wfi_timeout_count,
214 .get_max_gfxp_wfi_timeout_count =
215 gr_gp10b_get_max_gfxp_wfi_timeout_count,
216 },
217 .fb = {
218 .reset = fb_gk20a_reset,
219 .init_hw = gk20a_fb_init_hw,
220 .init_fs_state = fb_gm20b_init_fs_state,
221 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
222 .set_use_full_comp_tag_line =
223 gm20b_fb_set_use_full_comp_tag_line,
224 .compression_page_size = gp10b_fb_compression_page_size,
225 .compressible_page_size = gp10b_fb_compressible_page_size,
226 .compression_align_mask = gm20b_fb_compression_align_mask,
227 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
228 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
229 .read_wpr_info = gm20b_fb_read_wpr_info,
230 .is_debug_mode_enabled = NULL,
231 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
232 .tlb_invalidate = vgpu_mm_tlb_invalidate,
233 },
234 .clock_gating = {
235 .slcg_bus_load_gating_prod =
236 gp10b_slcg_bus_load_gating_prod,
237 .slcg_ce2_load_gating_prod =
238 gp10b_slcg_ce2_load_gating_prod,
239 .slcg_chiplet_load_gating_prod =
240 gp10b_slcg_chiplet_load_gating_prod,
241 .slcg_ctxsw_firmware_load_gating_prod =
242 gp10b_slcg_ctxsw_firmware_load_gating_prod,
243 .slcg_fb_load_gating_prod =
244 gp10b_slcg_fb_load_gating_prod,
245 .slcg_fifo_load_gating_prod =
246 gp10b_slcg_fifo_load_gating_prod,
247 .slcg_gr_load_gating_prod =
248 gr_gp10b_slcg_gr_load_gating_prod,
249 .slcg_ltc_load_gating_prod =
250 ltc_gp10b_slcg_ltc_load_gating_prod,
251 .slcg_perf_load_gating_prod =
252 gp10b_slcg_perf_load_gating_prod,
253 .slcg_priring_load_gating_prod =
254 gp10b_slcg_priring_load_gating_prod,
255 .slcg_pmu_load_gating_prod =
256 gp10b_slcg_pmu_load_gating_prod,
257 .slcg_therm_load_gating_prod =
258 gp10b_slcg_therm_load_gating_prod,
259 .slcg_xbar_load_gating_prod =
260 gp10b_slcg_xbar_load_gating_prod,
261 .blcg_bus_load_gating_prod =
262 gp10b_blcg_bus_load_gating_prod,
263 .blcg_ce_load_gating_prod =
264 gp10b_blcg_ce_load_gating_prod,
265 .blcg_ctxsw_firmware_load_gating_prod =
266 gp10b_blcg_ctxsw_firmware_load_gating_prod,
267 .blcg_fb_load_gating_prod =
268 gp10b_blcg_fb_load_gating_prod,
269 .blcg_fifo_load_gating_prod =
270 gp10b_blcg_fifo_load_gating_prod,
271 .blcg_gr_load_gating_prod =
272 gp10b_blcg_gr_load_gating_prod,
273 .blcg_ltc_load_gating_prod =
274 gp10b_blcg_ltc_load_gating_prod,
275 .blcg_pwr_csb_load_gating_prod =
276 gp10b_blcg_pwr_csb_load_gating_prod,
277 .blcg_pmu_load_gating_prod =
278 gp10b_blcg_pmu_load_gating_prod,
279 .blcg_xbar_load_gating_prod =
280 gp10b_blcg_xbar_load_gating_prod,
281 .pg_gr_load_gating_prod =
282 gr_gp10b_pg_gr_load_gating_prod,
283 },
284 .fifo = {
285 .init_fifo_setup_hw = vgpu_init_fifo_setup_hw,
286 .bind_channel = vgpu_channel_bind,
287 .unbind_channel = vgpu_channel_unbind,
288 .disable_channel = vgpu_channel_disable,
289 .enable_channel = vgpu_channel_enable,
290 .alloc_inst = vgpu_channel_alloc_inst,
291 .free_inst = vgpu_channel_free_inst,
292 .setup_ramfc = vgpu_channel_setup_ramfc,
293 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
294 .setup_userd = gk20a_fifo_setup_userd,
295 .userd_gp_get = gk20a_fifo_userd_gp_get,
296 .userd_gp_put = gk20a_fifo_userd_gp_put,
297 .userd_pb_get = gk20a_fifo_userd_pb_get,
298 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
299 .preempt_channel = vgpu_fifo_preempt_channel,
300 .preempt_tsg = vgpu_fifo_preempt_tsg,
301 .enable_tsg = vgpu_enable_tsg,
302 .disable_tsg = gk20a_disable_tsg,
303 .tsg_verify_channel_status = NULL,
304 .tsg_verify_status_ctx_reload = NULL,
305 .reschedule_runlist = NULL,
306 .update_runlist = vgpu_fifo_update_runlist,
307 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
308 .get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
309 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
310 .get_num_fifos = gm20b_fifo_get_num_fifos,
311 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
312 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
313 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
314 .tsg_open = vgpu_tsg_open,
315 .force_reset_ch = vgpu_fifo_force_reset_ch,
316 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
317 .device_info_data_parse = gp10b_device_info_data_parse,
318 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
319 .init_engine_info = vgpu_fifo_init_engine_info,
320 .runlist_entry_size = ram_rl_entry_size_v,
321 .get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
322 .get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
323 .is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
324 .dump_pbdma_status = gk20a_dump_pbdma_status,
325 .dump_eng_status = gk20a_dump_eng_status,
326 .dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
327 .intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
328 .is_preempt_pending = gk20a_fifo_is_preempt_pending,
329 .init_pbdma_intr_descs = gp10b_fifo_init_pbdma_intr_descs,
330 .reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
331 .teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
332 .handle_sched_error = gk20a_fifo_handle_sched_error,
333 .handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0,
334 .handle_pbdma_intr_1 = gk20a_fifo_handle_pbdma_intr_1,
335 .tsg_bind_channel = vgpu_tsg_bind_channel,
336 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
337#ifdef CONFIG_TEGRA_GK20A_NVHOST
338 .alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf,
339 .free_syncpt_buf = gk20a_fifo_free_syncpt_buf,
340 .add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd,
341 .get_syncpt_wait_cmd_size = gk20a_fifo_get_syncpt_wait_cmd_size,
342 .add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd,
343 .get_syncpt_incr_cmd_size = gk20a_fifo_get_syncpt_incr_cmd_size,
344 .get_sync_ro_map = NULL,
345#endif
346 .resetup_ramfc = NULL,
347 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
348 },
349 .gr_ctx = {
350 .get_netlist_name = gr_gp10b_get_netlist_name,
351 .is_fw_defined = gr_gp10b_is_firmware_defined,
352 },
353#ifdef CONFIG_GK20A_CTXSW_TRACE
354 .fecs_trace = {
355 .alloc_user_buffer = vgpu_alloc_user_buffer,
356 .free_user_buffer = vgpu_free_user_buffer,
357 .mmap_user_buffer = vgpu_mmap_user_buffer,
358 .init = vgpu_fecs_trace_init,
359 .deinit = vgpu_fecs_trace_deinit,
360 .enable = vgpu_fecs_trace_enable,
361 .disable = vgpu_fecs_trace_disable,
362 .is_enabled = vgpu_fecs_trace_is_enabled,
363 .reset = NULL,
364 .flush = NULL,
365 .poll = vgpu_fecs_trace_poll,
366 .bind_channel = NULL,
367 .unbind_channel = NULL,
368 .max_entries = vgpu_fecs_trace_max_entries,
369 .set_filter = vgpu_fecs_trace_set_filter,
370 },
371#endif /* CONFIG_GK20A_CTXSW_TRACE */
372 .mm = {
373 /* FIXME: add support for sparse mappings */
374 .support_sparse = NULL,
375 .gmmu_map = vgpu_gp10b_locked_gmmu_map,
376 .gmmu_unmap = vgpu_locked_gmmu_unmap,
377 .vm_bind_channel = vgpu_vm_bind_channel,
378 .fb_flush = vgpu_mm_fb_flush,
379 .l2_invalidate = vgpu_mm_l2_invalidate,
380 .l2_flush = vgpu_mm_l2_flush,
381 .cbc_clean = gk20a_mm_cbc_clean,
382 .set_big_page_size = gm20b_mm_set_big_page_size,
383 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
384 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
385 .gpu_phys_addr = gm20b_gpu_phys_addr,
386 .get_iommu_bit = gk20a_mm_get_iommu_bit,
387 .get_mmu_levels = gp10b_mm_get_mmu_levels,
388 .init_pdb = gp10b_mm_init_pdb,
389 .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw,
390 .is_bar1_supported = gm20b_mm_is_bar1_supported,
391 .init_inst_block = gk20a_init_inst_block,
392 .mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
393 .init_bar2_vm = gp10b_init_bar2_vm,
394 .init_bar2_mm_hw_setup = gp10b_init_bar2_mm_hw_setup,
395 .remove_bar2_vm = gp10b_remove_bar2_vm,
396 .get_kind_invalid = gm20b_get_kind_invalid,
397 .get_kind_pitch = gm20b_get_kind_pitch,
398 },
399 .pramin = {
400 .enter = gk20a_pramin_enter,
401 .exit = gk20a_pramin_exit,
402 .data032_r = pram_data032_r,
403 },
404 .therm = {
405 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
406 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
407 },
408 .pmu = {
409 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
410 .pmu_get_queue_head = pwr_pmu_queue_head_r,
411 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
412 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
413 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
414 .pmu_queue_head = gk20a_pmu_queue_head,
415 .pmu_queue_tail = gk20a_pmu_queue_tail,
416 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
417 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
418 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
419 .pmu_mutex_release = gk20a_pmu_mutex_release,
420 .write_dmatrfbase = gp10b_write_dmatrfbase,
421 .pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
422 .pmu_init_perfmon = nvgpu_pmu_init_perfmon,
423 .pmu_perfmon_start_sampling = nvgpu_pmu_perfmon_start_sampling,
424 .pmu_perfmon_stop_sampling = nvgpu_pmu_perfmon_stop_sampling,
425 .pmu_pg_init_param = gp10b_pg_gr_init,
426 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
427 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
428 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
429 .reset_engine = gk20a_pmu_engine_reset,
430 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
431 },
432 .regops = {
433 .get_global_whitelist_ranges =
434 gp10b_get_global_whitelist_ranges,
435 .get_global_whitelist_ranges_count =
436 gp10b_get_global_whitelist_ranges_count,
437 .get_context_whitelist_ranges =
438 gp10b_get_context_whitelist_ranges,
439 .get_context_whitelist_ranges_count =
440 gp10b_get_context_whitelist_ranges_count,
441 .get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
442 .get_runcontrol_whitelist_count =
443 gp10b_get_runcontrol_whitelist_count,
444 .get_runcontrol_whitelist_ranges =
445 gp10b_get_runcontrol_whitelist_ranges,
446 .get_runcontrol_whitelist_ranges_count =
447 gp10b_get_runcontrol_whitelist_ranges_count,
448 .get_qctl_whitelist = gp10b_get_qctl_whitelist,
449 .get_qctl_whitelist_count = gp10b_get_qctl_whitelist_count,
450 .get_qctl_whitelist_ranges = gp10b_get_qctl_whitelist_ranges,
451 .get_qctl_whitelist_ranges_count =
452 gp10b_get_qctl_whitelist_ranges_count,
453 .apply_smpc_war = gp10b_apply_smpc_war,
454 },
455 .mc = {
456 .intr_enable = mc_gp10b_intr_enable,
457 .intr_unit_config = mc_gp10b_intr_unit_config,
458 .isr_stall = mc_gp10b_isr_stall,
459 .intr_stall = mc_gp10b_intr_stall,
460 .intr_stall_pause = mc_gp10b_intr_stall_pause,
461 .intr_stall_resume = mc_gp10b_intr_stall_resume,
462 .intr_nonstall = mc_gp10b_intr_nonstall,
463 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
464 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
465 .enable = gk20a_mc_enable,
466 .disable = gk20a_mc_disable,
467 .reset = gk20a_mc_reset,
468 .boot_0 = gk20a_mc_boot_0,
469 .is_intr1_pending = mc_gp10b_is_intr1_pending,
470 },
471 .debug = {
472 .show_dump = NULL,
473 },
474 .dbg_session_ops = {
475 .exec_reg_ops = vgpu_exec_regops,
476 .dbg_set_powergate = vgpu_dbg_set_powergate,
477 .check_and_set_global_reservation =
478 vgpu_check_and_set_global_reservation,
479 .check_and_set_context_reservation =
480 vgpu_check_and_set_context_reservation,
481 .release_profiler_reservation =
482 vgpu_release_profiler_reservation,
483 .perfbuffer_enable = vgpu_perfbuffer_enable,
484 .perfbuffer_disable = vgpu_perfbuffer_disable,
485 },
486 .bus = {
487 .init_hw = gk20a_bus_init_hw,
488 .isr = gk20a_bus_isr,
489 .read_ptimer = vgpu_read_ptimer,
490 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
491 .bar1_bind = gk20a_bus_bar1_bind,
492 },
493#if defined(CONFIG_GK20A_CYCLE_STATS)
494 .css = {
495 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
496 .disable_snapshot = vgpu_css_release_snapshot_buffer,
497 .check_data_available = vgpu_css_flush_snapshots,
498 .detach_snapshot = vgpu_css_detach,
499 .set_handled_snapshots = NULL,
500 .allocate_perfmon_ids = NULL,
501 .release_perfmon_ids = NULL,
502 },
503#endif
504 .falcon = {
505 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
506 },
507 .priv_ring = {
508 .isr = gp10b_priv_ring_isr,
509 },
510 .fuse = {
511 .check_priv_security = vgpu_gp10b_fuse_check_priv_security,
512 },
513 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
514 .get_litter_value = gp10b_get_litter_value,
515};
516
517int vgpu_gp10b_init_hal(struct gk20a *g)
518{
519 struct gpu_ops *gops = &g->ops;
520
521 gops->ltc = vgpu_gp10b_ops.ltc;
522 gops->ce2 = vgpu_gp10b_ops.ce2;
523 gops->gr = vgpu_gp10b_ops.gr;
524 gops->fb = vgpu_gp10b_ops.fb;
525 gops->clock_gating = vgpu_gp10b_ops.clock_gating;
526 gops->fifo = vgpu_gp10b_ops.fifo;
527 gops->gr_ctx = vgpu_gp10b_ops.gr_ctx;
528#ifdef CONFIG_GK20A_CTXSW_TRACE
529 gops->fecs_trace = vgpu_gp10b_ops.fecs_trace;
530#endif
531 gops->mm = vgpu_gp10b_ops.mm;
532 gops->pramin = vgpu_gp10b_ops.pramin;
533 gops->therm = vgpu_gp10b_ops.therm;
534 gops->pmu = vgpu_gp10b_ops.pmu;
535 gops->regops = vgpu_gp10b_ops.regops;
536 gops->mc = vgpu_gp10b_ops.mc;
537 gops->debug = vgpu_gp10b_ops.debug;
538 gops->dbg_session_ops = vgpu_gp10b_ops.dbg_session_ops;
539 gops->bus = vgpu_gp10b_ops.bus;
540#if defined(CONFIG_GK20A_CYCLE_STATS)
541 gops->css = vgpu_gp10b_ops.css;
542#endif
543 gops->falcon = vgpu_gp10b_ops.falcon;
544
545 gops->priv_ring = vgpu_gp10b_ops.priv_ring;
546
547 gops->fuse = vgpu_gp10b_ops.fuse;
548
549 /* Lone Functions */
550 gops->chip_init_gpu_characteristics =
551 vgpu_gp10b_ops.chip_init_gpu_characteristics;
552 gops->get_litter_value = vgpu_gp10b_ops.get_litter_value;
553
554 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
555 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
556
557 /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
558 if (gops->fuse.check_priv_security(g))
559 return -EINVAL; /* Do not boot gpu */
560
561 /* priv security dependent ops */
562 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
563 /* Add in ops from gm20b acr */
564 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
565 gops->pmu.prepare_ucode = prepare_ucode_blob,
566 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
567 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
568 gops->pmu.is_priv_load = gm20b_is_priv_load,
569 gops->pmu.get_wpr = gm20b_wpr_info,
570 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
571 gops->pmu.pmu_populate_loader_cfg =
572 gm20b_pmu_populate_loader_cfg,
573 gops->pmu.flcn_populate_bl_dmem_desc =
574 gm20b_flcn_populate_bl_dmem_desc,
575 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
576 gops->pmu.falcon_clear_halt_interrupt_status =
577 clear_halt_interrupt_status,
578 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
579
580 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
581 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
582 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
583 gops->pmu.is_priv_load = gp10b_is_priv_load;
584
585 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
586 } else {
587 /* Inherit from gk20a */
588 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
589 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
590 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
591 gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
592
593 gops->pmu.load_lsfalcon_ucode = NULL;
594 gops->pmu.init_wpr_region = NULL;
595 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
596
597 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
598 }
599
600 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
601 g->pmu_lsf_pmu_wpr_init_done = 0;
602 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
603
604 g->name = "gp10b";
605
606 return 0;
607}
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
deleted file mode 100644
index 26ce891f..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "common/linux/vgpu/vgpu.h"
20#include "vgpu_mm_gp10b.h"
21#include "gk20a/mm_gk20a.h"
22
23#include <nvgpu/bug.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/vgpu/vgpu_ivc.h>
26
27int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
28{
29 g->mm.disable_bigpage = true;
30 return 0;
31}
32
33static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
34 u64 addr, u64 size, size_t *oob_size)
35{
36 if (*oob_size < sizeof(*mem_desc))
37 return -ENOMEM;
38
39 mem_desc->addr = addr;
40 mem_desc->length = size;
41 *oob_size -= sizeof(*mem_desc);
42 return 0;
43}
44
45u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
46 u64 map_offset,
47 struct nvgpu_sgt *sgt,
48 u64 buffer_offset,
49 u64 size,
50 int pgsz_idx,
51 u8 kind_v,
52 u32 ctag_offset,
53 u32 flags,
54 int rw_flag,
55 bool clear_ctags,
56 bool sparse,
57 bool priv,
58 struct vm_gk20a_mapping_batch *batch,
59 enum nvgpu_aperture aperture)
60{
61 int err = 0;
62 struct gk20a *g = gk20a_from_vm(vm);
63 struct tegra_vgpu_cmd_msg msg;
64 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
65 struct tegra_vgpu_mem_desc *mem_desc;
66 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
67 u64 buffer_size = PAGE_ALIGN(size);
68 u64 space_to_skip = buffer_offset;
69 u32 mem_desc_count = 0, i;
70 void *handle = NULL;
71 size_t oob_size;
72 u8 prot;
73 void *sgl;
74
75 gk20a_dbg_fn("");
76
77 /* FIXME: add support for sparse mappings */
78
79 if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g)))
80 return 0;
81
82 if (space_to_skip & (page_size - 1))
83 return 0;
84
85 memset(&msg, 0, sizeof(msg));
86
87 /* Allocate (or validate when map_offset != 0) the virtual address. */
88 if (!map_offset) {
89 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
90 if (!map_offset) {
91 nvgpu_err(g, "failed to allocate va space");
92 err = -ENOMEM;
93 goto fail;
94 }
95 }
96
97 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
98 TEGRA_VGPU_QUEUE_CMD,
99 (void **)&mem_desc, &oob_size);
100 if (!handle) {
101 err = -EINVAL;
102 goto fail;
103 }
104 sgl = sgt->sgl;
105 while (sgl) {
106 u64 phys_addr;
107 u64 chunk_length;
108
109 /*
110 * Cut out sgl ents for space_to_skip.
111 */
112 if (space_to_skip &&
113 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
114 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
115 sgl = nvgpu_sgt_get_next(sgt, sgl);
116 continue;
117 }
118
119 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
120 chunk_length = min(size,
121 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
122
123 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
124 chunk_length, &oob_size)) {
125 err = -ENOMEM;
126 goto fail;
127 }
128
129 space_to_skip = 0;
130 size -= chunk_length;
131 sgl = nvgpu_sgt_get_next(sgt, sgl);
132
133 if (size == 0)
134 break;
135 }
136
137 if (rw_flag == gk20a_mem_flag_read_only)
138 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
139 else if (rw_flag == gk20a_mem_flag_write_only)
140 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
141 else
142 prot = TEGRA_VGPU_MAP_PROT_NONE;
143
144 if (pgsz_idx == gmmu_page_size_kernel) {
145 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
146 pgsz_idx = gmmu_page_size_small;
147 } else if (page_size ==
148 vm->gmmu_page_sizes[gmmu_page_size_big]) {
149 pgsz_idx = gmmu_page_size_big;
150 } else {
151 nvgpu_err(g, "invalid kernel page size %d",
152 page_size);
153 goto fail;
154 }
155 }
156
157 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
158 msg.handle = vgpu_get_handle(g);
159 p->handle = vm->handle;
160 p->gpu_va = map_offset;
161 p->size = buffer_size;
162 p->mem_desc_count = mem_desc_count;
163 p->pgsz_idx = pgsz_idx;
164 p->iova = 0;
165 p->kind = kind_v;
166 if (flags & NVGPU_VM_MAP_CACHEABLE)
167 p->flags = TEGRA_VGPU_MAP_CACHEABLE;
168 if (flags & NVGPU_VM_MAP_IO_COHERENT)
169 p->flags |= TEGRA_VGPU_MAP_IO_COHERENT;
170 if (flags & NVGPU_VM_MAP_L3_ALLOC)
171 p->flags |= TEGRA_VGPU_MAP_L3_ALLOC;
172 p->prot = prot;
173 p->ctag_offset = ctag_offset;
174 p->clear_ctags = clear_ctags;
175 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
176 if (err || msg.ret)
177 goto fail;
178
179 /* TLB invalidate handled on server side */
180
181 vgpu_ivc_oob_put_ptr(handle);
182 return map_offset;
183fail:
184 if (handle)
185 vgpu_ivc_oob_put_ptr(handle);
186 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
187 nvgpu_err(g,
188 " Map: %-5s GPU virt %#-12llx +%#-9llx "
189 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
190 "kind=%#02x APT=%-6s",
191 vm->name, map_offset, buffer_size, buffer_offset,
192 vm->gmmu_page_sizes[pgsz_idx] >> 10,
193 nvgpu_gmmu_perm_str(rw_flag),
194 kind_v, "SYSMEM");
195 for (i = 0; i < mem_desc_count; i++)
196 nvgpu_err(g, " > 0x%010llx + 0x%llx",
197 mem_desc[i].addr, mem_desc[i].length);
198
199 return 0;
200}
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.h b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.h
deleted file mode 100644
index 0a477dd0..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __VGPU_MM_GP10B_H__
18#define __VGPU_MM_GP10B_H__
19
20#include "gk20a/gk20a.h"
21
22u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
23 u64 map_offset,
24 struct nvgpu_sgt *sgt,
25 u64 buffer_offset,
26 u64 size,
27 int pgsz_idx,
28 u8 kind_v,
29 u32 ctag_offset,
30 u32 flags,
31 int rw_flag,
32 bool clear_ctags,
33 bool sparse,
34 bool priv,
35 struct vm_gk20a_mapping_batch *batch,
36 enum nvgpu_aperture aperture);
37int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g);
38
39#endif