summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-14 09:43:28 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 11:27:19 -0500
commitb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (patch)
tree26e2d919f019d15b51bba4d7b5c938f77ad5cff5 /drivers/gpu/nvgpu/vgpu
parentb7cc3a2aa6c92a09eed43513287c9062f22ad127 (diff)
gpu: nvgpu: move vgpu code to linux
Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/ce2_vgpu.c51
-rw-r--r--drivers/gpu/nvgpu/vgpu/clk_vgpu.c170
-rw-r--r--drivers/gpu/nvgpu/vgpu/clk_vgpu.h33
-rw-r--r--drivers/gpu/nvgpu/vgpu/css_vgpu.c240
-rw-r--r--drivers/gpu/nvgpu/vgpu/css_vgpu.h40
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.c216
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.h47
-rw-r--r--drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c231
-rw-r--r--drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.h47
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c828
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.h65
-rw-r--r--drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c69
-rw-r--r--drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.h31
-rw-r--r--drivers/gpu/nvgpu/vgpu/gm20b/vgpu_hal_gm20b.c588
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c338
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h45
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c630
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c203
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h45
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c1220
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.h70
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c105
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c117
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h31
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c41
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c37
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h21
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c642
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c79
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h31
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c59
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h29
-rw-r--r--drivers/gpu/nvgpu/vgpu/ltc_vgpu.c67
-rw-r--r--drivers/gpu/nvgpu/vgpu/ltc_vgpu.h33
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c369
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.h56
-rw-r--r--drivers/gpu/nvgpu/vgpu/sysfs_vgpu.c55
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c142
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c782
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.h194
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu_t19x.h36
43 files changed, 0 insertions, 8193 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
deleted file mode 100644
index bd225f0c..00000000
--- a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Virtualized GPU CE2
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "vgpu/vgpu.h"
26
27#include <nvgpu/bug.h>
28
29int vgpu_ce2_nonstall_isr(struct gk20a *g,
30 struct tegra_vgpu_ce2_nonstall_intr_info *info)
31{
32 gk20a_dbg_fn("");
33
34 switch (info->type) {
35 case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE:
36 gk20a_channel_semaphore_wakeup(g, true);
37 break;
38 default:
39 WARN_ON(1);
40 break;
41 }
42
43 return 0;
44}
45
46u32 vgpu_ce_get_num_pce(struct gk20a *g)
47{
48 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
49
50 return priv->constants.num_pce;
51}
diff --git a/drivers/gpu/nvgpu/vgpu/clk_vgpu.c b/drivers/gpu/nvgpu/vgpu/clk_vgpu.c
deleted file mode 100644
index e4ad8f68..00000000
--- a/drivers/gpu/nvgpu/vgpu/clk_vgpu.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * Virtualized GPU Clock Interface
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "vgpu/vgpu.h"
26#include "vgpu/clk_vgpu.h"
27
28static unsigned long
29vgpu_freq_table[TEGRA_VGPU_GPU_FREQ_TABLE_SIZE];
30
31static unsigned long vgpu_clk_get_rate(struct gk20a *g, u32 api_domain)
32{
33 struct tegra_vgpu_cmd_msg msg = {};
34 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
35 int err;
36 unsigned long ret = 0;
37
38 gk20a_dbg_fn("");
39
40 switch (api_domain) {
41 case CTRL_CLK_DOMAIN_GPCCLK:
42 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_CLK_RATE;
43 msg.handle = vgpu_get_handle(g);
44 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
45 err = err ? err : msg.ret;
46 if (err)
47 nvgpu_err(g, "%s failed - %d", __func__, err);
48 else
49 /* return frequency in Hz */
50 ret = p->rate * 1000;
51 break;
52 case CTRL_CLK_DOMAIN_PWRCLK:
53 nvgpu_err(g, "unsupported clock: %u", api_domain);
54 break;
55 default:
56 nvgpu_err(g, "unknown clock: %u", api_domain);
57 break;
58 }
59
60 return ret;
61}
62
63static int vgpu_clk_set_rate(struct gk20a *g,
64 u32 api_domain, unsigned long rate)
65{
66 struct tegra_vgpu_cmd_msg msg = {};
67 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
68 int err = -EINVAL;
69
70 gk20a_dbg_fn("");
71
72 switch (api_domain) {
73 case CTRL_CLK_DOMAIN_GPCCLK:
74 msg.cmd = TEGRA_VGPU_CMD_SET_GPU_CLK_RATE;
75 msg.handle = vgpu_get_handle(g);
76
77 /* server dvfs framework requires frequency in kHz */
78 p->rate = (u32)(rate / 1000);
79 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
80 err = err ? err : msg.ret;
81 if (err)
82 nvgpu_err(g, "%s failed - %d", __func__, err);
83 break;
84 case CTRL_CLK_DOMAIN_PWRCLK:
85 nvgpu_err(g, "unsupported clock: %u", api_domain);
86 break;
87 default:
88 nvgpu_err(g, "unknown clock: %u", api_domain);
89 break;
90 }
91
92 return err;
93}
94
95static unsigned long vgpu_clk_get_maxrate(struct gk20a *g, u32 api_domain)
96{
97 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
98
99 return priv->constants.max_freq;
100}
101
102void vgpu_init_clk_support(struct gk20a *g)
103{
104 g->ops.clk.get_rate = vgpu_clk_get_rate;
105 g->ops.clk.set_rate = vgpu_clk_set_rate;
106 g->ops.clk.get_maxrate = vgpu_clk_get_maxrate;
107}
108
109long vgpu_clk_round_rate(struct device *dev, unsigned long rate)
110{
111 /* server will handle frequency rounding */
112 return rate;
113}
114
115int vgpu_clk_get_freqs(struct device *dev,
116 unsigned long **freqs, int *num_freqs)
117{
118 struct gk20a_platform *platform = gk20a_get_platform(dev);
119 struct gk20a *g = platform->g;
120 struct tegra_vgpu_cmd_msg msg = {};
121 struct tegra_vgpu_get_gpu_freq_table_params *p =
122 &msg.params.get_gpu_freq_table;
123 unsigned int i;
124 int err;
125
126 gk20a_dbg_fn("");
127
128 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE;
129 msg.handle = vgpu_get_handle(g);
130
131 p->num_freqs = TEGRA_VGPU_GPU_FREQ_TABLE_SIZE;
132 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
133 err = err ? err : msg.ret;
134 if (err) {
135 nvgpu_err(g, "%s failed - %d", __func__, err);
136 return err;
137 }
138
139 /* return frequency in Hz */
140 for (i = 0; i < p->num_freqs; i++)
141 vgpu_freq_table[i] = p->freqs[i] * 1000;
142
143 *freqs = vgpu_freq_table;
144 *num_freqs = p->num_freqs;
145
146 return 0;
147}
148
149int vgpu_clk_cap_rate(struct device *dev, unsigned long rate)
150{
151 struct gk20a_platform *platform = gk20a_get_platform(dev);
152 struct gk20a *g = platform->g;
153 struct tegra_vgpu_cmd_msg msg = {};
154 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
155 int err = 0;
156
157 gk20a_dbg_fn("");
158
159 msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE;
160 msg.handle = vgpu_get_handle(g);
161 p->rate = (u32)rate;
162 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
163 err = err ? err : msg.ret;
164 if (err) {
165 nvgpu_err(g, "%s failed - %d", __func__, err);
166 return err;
167 }
168
169 return 0;
170}
diff --git a/drivers/gpu/nvgpu/vgpu/clk_vgpu.h b/drivers/gpu/nvgpu/vgpu/clk_vgpu.h
deleted file mode 100644
index 58ab5755..00000000
--- a/drivers/gpu/nvgpu/vgpu/clk_vgpu.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Virtualized GPU Clock Interface
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _CLK_VIRT_H_
26#define _CLK_VIRT_H_
27
28void vgpu_init_clk_support(struct gk20a *g);
29long vgpu_clk_round_rate(struct device *dev, unsigned long rate);
30int vgpu_clk_get_freqs(struct device *dev,
31 unsigned long **freqs, int *num_freqs);
32int vgpu_clk_cap_rate(struct device *dev, unsigned long rate);
33#endif
diff --git a/drivers/gpu/nvgpu/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/vgpu/css_vgpu.c
deleted file mode 100644
index 7362fc6f..00000000
--- a/drivers/gpu/nvgpu/vgpu/css_vgpu.c
+++ /dev/null
@@ -1,240 +0,0 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#if defined(CONFIG_GK20A_CYCLE_STATS)
23
24#include <linux/tegra-ivc.h>
25#include <linux/tegra_vgpu.h>
26#include <uapi/linux/nvgpu.h>
27
28#include "gk20a/gk20a.h"
29#include "gk20a/channel_gk20a.h"
30#include "gk20a/css_gr_gk20a.h"
31#include "common/linux/platform_gk20a.h"
32#include "vgpu.h"
33#include "css_vgpu.h"
34
35static struct tegra_hv_ivm_cookie *css_cookie;
36
37static struct tegra_hv_ivm_cookie *vgpu_css_reserve_mempool(struct gk20a *g)
38{
39 struct device *dev = dev_from_gk20a(g);
40 struct device_node *np = dev->of_node;
41 struct of_phandle_args args;
42 struct device_node *hv_np;
43 struct tegra_hv_ivm_cookie *cookie;
44 u32 mempool;
45 int err;
46
47 err = of_parse_phandle_with_fixed_args(np,
48 "mempool-css", 1, 0, &args);
49 if (err) {
50 nvgpu_err(g, "dt missing mempool-css");
51 return ERR_PTR(err);
52 }
53
54 hv_np = args.np;
55 mempool = args.args[0];
56 cookie = tegra_hv_mempool_reserve(hv_np, mempool);
57 if (IS_ERR_OR_NULL(cookie)) {
58 nvgpu_err(g, "mempool %u reserve failed", mempool);
59 return ERR_PTR(-EINVAL);
60 }
61 return cookie;
62}
63
64u32 vgpu_css_get_buffer_size(struct gk20a *g)
65{
66 struct tegra_hv_ivm_cookie *cookie;
67 u32 size;
68
69 nvgpu_log_fn(g, " ");
70
71 if (css_cookie) {
72 nvgpu_log_info(g, "buffer size = %llu", css_cookie->size);
73 return (u32)css_cookie->size;
74 }
75
76 cookie = vgpu_css_reserve_mempool(g);
77 if (IS_ERR(cookie))
78 return 0;
79
80 size = cookie->size;
81
82 tegra_hv_mempool_unreserve(cookie);
83 nvgpu_log_info(g, "buffer size = %u", size);
84 return size;
85}
86
87static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr)
88{
89 struct gk20a *g = gr->g;
90 struct gk20a_cs_snapshot *data = gr->cs_data;
91 void *buf = NULL;
92 int err;
93
94 gk20a_dbg_fn("");
95
96 if (data->hw_snapshot)
97 return 0;
98
99 css_cookie = vgpu_css_reserve_mempool(g);
100 if (IS_ERR(css_cookie))
101 return PTR_ERR(css_cookie);
102
103 /* Make sure buffer size is large enough */
104 if (css_cookie->size < CSS_MIN_HW_SNAPSHOT_SIZE) {
105 nvgpu_info(g, "mempool size %lld too small",
106 css_cookie->size);
107 err = -ENOMEM;
108 goto fail;
109 }
110
111 buf = ioremap_cache(css_cookie->ipa, css_cookie->size);
112 if (!buf) {
113 nvgpu_info(g, "ioremap_cache failed");
114 err = -EINVAL;
115 goto fail;
116 }
117
118 data->hw_snapshot = buf;
119 data->hw_end = data->hw_snapshot +
120 css_cookie->size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
121 data->hw_get = data->hw_snapshot;
122 memset(data->hw_snapshot, 0xff, css_cookie->size);
123 return 0;
124fail:
125 tegra_hv_mempool_unreserve(css_cookie);
126 css_cookie = NULL;
127 return err;
128}
129
130void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
131{
132 struct gk20a_cs_snapshot *data = gr->cs_data;
133
134 if (!data->hw_snapshot)
135 return;
136
137 iounmap(data->hw_snapshot);
138 data->hw_snapshot = NULL;
139
140 tegra_hv_mempool_unreserve(css_cookie);
141 css_cookie = NULL;
142
143 gk20a_dbg_info("cyclestats(vgpu): buffer for snapshots released\n");
144}
145
146int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
147 u32 *pending, bool *hw_overflow)
148{
149 struct gk20a *g = ch->g;
150 struct tegra_vgpu_cmd_msg msg = {};
151 struct tegra_vgpu_channel_cyclestats_snapshot_params *p;
152 struct gr_gk20a *gr = &g->gr;
153 struct gk20a_cs_snapshot *data = gr->cs_data;
154 int err;
155
156 gk20a_dbg_fn("");
157
158 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
159 msg.handle = vgpu_get_handle(g);
160 p = &msg.params.cyclestats_snapshot;
161 p->handle = ch->virt_ctx;
162 p->subcmd = NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH;
163 p->buf_info = (uintptr_t)data->hw_get - (uintptr_t)data->hw_snapshot;
164
165 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
166
167 err = (err || msg.ret) ? -1 : 0;
168
169 *pending = p->buf_info;
170 *hw_overflow = p->hw_overflow;
171
172 return err;
173}
174
175static int vgpu_css_attach(struct channel_gk20a *ch,
176 struct gk20a_cs_snapshot_client *cs_client)
177{
178 struct gk20a *g = ch->g;
179 struct tegra_vgpu_cmd_msg msg = {};
180 struct tegra_vgpu_channel_cyclestats_snapshot_params *p =
181 &msg.params.cyclestats_snapshot;
182 int err;
183
184 gk20a_dbg_fn("");
185
186 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
187 msg.handle = vgpu_get_handle(g);
188 p->handle = ch->virt_ctx;
189 p->subcmd = NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH;
190 p->perfmon_count = cs_client->perfmon_count;
191
192 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
193 err = err ? err : msg.ret;
194 if (err)
195 nvgpu_err(g, "failed");
196 else
197 cs_client->perfmon_start = p->perfmon_start;
198
199 return err;
200}
201
202int vgpu_css_detach(struct channel_gk20a *ch,
203 struct gk20a_cs_snapshot_client *cs_client)
204{
205 struct gk20a *g = ch->g;
206 struct tegra_vgpu_cmd_msg msg = {};
207 struct tegra_vgpu_channel_cyclestats_snapshot_params *p =
208 &msg.params.cyclestats_snapshot;
209 int err;
210
211 gk20a_dbg_fn("");
212
213 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
214 msg.handle = vgpu_get_handle(g);
215 p->handle = ch->virt_ctx;
216 p->subcmd = NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_DETACH;
217 p->perfmon_start = cs_client->perfmon_start;
218 p->perfmon_count = cs_client->perfmon_count;
219
220 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
221 err = err ? err : msg.ret;
222 if (err)
223 nvgpu_err(g, "failed");
224
225 return err;
226}
227
228int vgpu_css_enable_snapshot_buffer(struct channel_gk20a *ch,
229 struct gk20a_cs_snapshot_client *cs_client)
230{
231 int ret;
232
233 ret = vgpu_css_attach(ch, cs_client);
234 if (ret)
235 return ret;
236
237 ret = vgpu_css_init_snapshot_buffer(&ch->g->gr);
238 return ret;
239}
240#endif /* CONFIG_GK20A_CYCLE_STATS */
diff --git a/drivers/gpu/nvgpu/vgpu/css_vgpu.h b/drivers/gpu/nvgpu/vgpu/css_vgpu.h
deleted file mode 100644
index 8c92d571..00000000
--- a/drivers/gpu/nvgpu/vgpu/css_vgpu.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _CSS_VGPU_H_
24#define _CSS_VGPU_H_
25
26#include <nvgpu/types.h>
27
28struct gr_gk20a;
29struct channel_gk20a;
30struct gk20a_cs_snapshot_client;
31
32void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr);
33int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
34 u32 *pending, bool *hw_overflow);
35int vgpu_css_detach(struct channel_gk20a *ch,
36 struct gk20a_cs_snapshot_client *cs_client);
37int vgpu_css_enable_snapshot_buffer(struct channel_gk20a *ch,
38 struct gk20a_cs_snapshot_client *cs_client);
39u32 vgpu_css_get_buffer_size(struct gk20a *g);
40#endif
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
deleted file mode 100644
index 4879c2eb..00000000
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
+++ /dev/null
@@ -1,216 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/tegra_gr_comm.h>
24#include <linux/tegra_vgpu.h>
25#include <uapi/linux/nvgpu.h>
26
27#include "gk20a/gk20a.h"
28#include "gk20a/channel_gk20a.h"
29#include "gk20a/dbg_gpu_gk20a.h"
30#include "vgpu.h"
31#include "dbg_vgpu.h"
32
33#include <nvgpu/bug.h>
34
35int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
36 struct nvgpu_dbg_gpu_reg_op *ops,
37 u64 num_ops)
38{
39 struct channel_gk20a *ch;
40 struct tegra_vgpu_cmd_msg msg;
41 struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops;
42 void *oob;
43 size_t oob_size, ops_size;
44 void *handle = NULL;
45 int err = 0;
46
47 gk20a_dbg_fn("");
48 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));
49
50 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
51 tegra_gr_comm_get_server_vmid(),
52 TEGRA_VGPU_QUEUE_CMD,
53 &oob, &oob_size);
54 if (!handle)
55 return -EINVAL;
56
57 ops_size = sizeof(*ops) * num_ops;
58 if (oob_size < ops_size) {
59 err = -ENOMEM;
60 goto fail;
61 }
62
63 memcpy(oob, ops, ops_size);
64
65 msg.cmd = TEGRA_VGPU_CMD_REG_OPS;
66 msg.handle = vgpu_get_handle(dbg_s->g);
67 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
68 p->handle = ch ? ch->virt_ctx : 0;
69 p->num_ops = num_ops;
70 p->is_profiler = dbg_s->is_profiler;
71 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
72 err = err ? err : msg.ret;
73 if (!err)
74 memcpy(ops, oob, ops_size);
75
76fail:
77 tegra_gr_comm_oob_put_ptr(handle);
78 return err;
79}
80
81int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate)
82{
83 struct tegra_vgpu_cmd_msg msg;
84 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate;
85 int err = 0;
86 u32 mode;
87
88 gk20a_dbg_fn("");
89
90 /* Just return if requested mode is the same as the session's mode */
91 if (disable_powergate) {
92 if (dbg_s->is_pg_disabled)
93 return 0;
94 dbg_s->is_pg_disabled = true;
95 mode = NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE;
96 } else {
97 if (!dbg_s->is_pg_disabled)
98 return 0;
99 dbg_s->is_pg_disabled = false;
100 mode = NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE;
101 }
102
103 msg.cmd = TEGRA_VGPU_CMD_SET_POWERGATE;
104 msg.handle = vgpu_get_handle(dbg_s->g);
105 p->mode = mode;
106 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
107 err = err ? err : msg.ret;
108 return err;
109}
110
111static int vgpu_sendrecv_prof_cmd(struct dbg_session_gk20a *dbg_s, u32 mode)
112{
113 struct tegra_vgpu_cmd_msg msg;
114 struct tegra_vgpu_prof_mgt_params *p = &msg.params.prof_management;
115 int err = 0;
116
117 msg.cmd = TEGRA_VGPU_CMD_PROF_MGT;
118 msg.handle = vgpu_get_handle(dbg_s->g);
119
120 p->mode = mode;
121
122 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
123 err = err ? err : msg.ret;
124 return err;
125}
126
127bool vgpu_check_and_set_global_reservation(
128 struct dbg_session_gk20a *dbg_s,
129 struct dbg_profiler_object_data *prof_obj)
130{
131 struct gk20a *g = dbg_s->g;
132
133 if (g->profiler_reservation_count > 0)
134 return false;
135
136 /* Check that another guest OS doesn't already have a reservation */
137 if (!vgpu_sendrecv_prof_cmd(dbg_s, TEGRA_VGPU_PROF_GET_GLOBAL)) {
138 g->global_profiler_reservation_held = true;
139 g->profiler_reservation_count = 1;
140 dbg_s->has_profiler_reservation = true;
141 prof_obj->has_reservation = true;
142 return true;
143 }
144 return false;
145}
146
147bool vgpu_check_and_set_context_reservation(
148 struct dbg_session_gk20a *dbg_s,
149 struct dbg_profiler_object_data *prof_obj)
150{
151 struct gk20a *g = dbg_s->g;
152
153 /* Assumes that we've already checked that no global reservation
154 * is in effect for this guest.
155 *
156 * If our reservation count is non-zero, then no other guest has the
157 * global reservation; if it is zero, need to check with RM server.
158 *
159 */
160 if ((g->profiler_reservation_count != 0) ||
161 !vgpu_sendrecv_prof_cmd(dbg_s, TEGRA_VGPU_PROF_GET_CONTEXT)) {
162 g->profiler_reservation_count++;
163 dbg_s->has_profiler_reservation = true;
164 prof_obj->has_reservation = true;
165 return true;
166 }
167 return false;
168}
169
170void vgpu_release_profiler_reservation(
171 struct dbg_session_gk20a *dbg_s,
172 struct dbg_profiler_object_data *prof_obj)
173{
174 struct gk20a *g = dbg_s->g;
175
176 dbg_s->has_profiler_reservation = false;
177 prof_obj->has_reservation = false;
178 if (prof_obj->ch == NULL)
179 g->global_profiler_reservation_held = false;
180
181 /* If new reservation count is zero, notify server */
182 g->profiler_reservation_count--;
183 if (g->profiler_reservation_count == 0)
184 vgpu_sendrecv_prof_cmd(dbg_s, TEGRA_VGPU_PROF_RELEASE);
185}
186
187static int vgpu_sendrecv_perfbuf_cmd(struct gk20a *g, u64 offset, u32 size)
188{
189 struct mm_gk20a *mm = &g->mm;
190 struct vm_gk20a *vm = mm->perfbuf.vm;
191 struct tegra_vgpu_cmd_msg msg;
192 struct tegra_vgpu_perfbuf_mgt_params *p =
193 &msg.params.perfbuf_management;
194 int err;
195
196 msg.cmd = TEGRA_VGPU_CMD_PERFBUF_MGT;
197 msg.handle = vgpu_get_handle(g);
198
199 p->vm_handle = vm->handle;
200 p->offset = offset;
201 p->size = size;
202
203 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
204 err = err ? err : msg.ret;
205 return err;
206}
207
208int vgpu_perfbuffer_enable(struct gk20a *g, u64 offset, u32 size)
209{
210 return vgpu_sendrecv_perfbuf_cmd(g, offset, size);
211}
212
213int vgpu_perfbuffer_disable(struct gk20a *g)
214{
215 return vgpu_sendrecv_perfbuf_cmd(g, 0, 0);
216}
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.h b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.h
deleted file mode 100644
index b2ca2a7b..00000000
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _DBG_VGPU_H_
24#define _DBG_VGPU_H_
25
26struct dbg_session_gk20a;
27struct nvgpu_dbg_gpu_reg_op;
28struct dbg_profiler_object_data;
29struct gk20a;
30
31int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
32 struct nvgpu_dbg_gpu_reg_op *ops,
33 u64 num_ops);
34int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate);
35bool vgpu_check_and_set_global_reservation(
36 struct dbg_session_gk20a *dbg_s,
37 struct dbg_profiler_object_data *prof_obj);
38bool vgpu_check_and_set_context_reservation(
39 struct dbg_session_gk20a *dbg_s,
40 struct dbg_profiler_object_data *prof_obj);
41
42void vgpu_release_profiler_reservation(
43 struct dbg_session_gk20a *dbg_s,
44 struct dbg_profiler_object_data *prof_obj);
45int vgpu_perfbuffer_enable(struct gk20a *g, u64 offset, u32 size);
46int vgpu_perfbuffer_disable(struct gk20a *g);
47#endif
diff --git a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c
deleted file mode 100644
index dc7608ff..00000000
--- a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c
+++ /dev/null
@@ -1,231 +0,0 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/string.h>
24#include <linux/tegra-ivc.h>
25#include <linux/tegra_vgpu.h>
26
27#include <uapi/linux/nvgpu.h>
28
29#include <nvgpu/kmem.h>
30#include <nvgpu/bug.h>
31#include <nvgpu/enabled.h>
32#include <nvgpu/ctxsw_trace.h>
33
34#include "gk20a/gk20a.h"
35#include "vgpu.h"
36#include "fecs_trace_vgpu.h"
37
38struct vgpu_fecs_trace {
39 struct tegra_hv_ivm_cookie *cookie;
40 struct nvgpu_ctxsw_ring_header *header;
41 struct nvgpu_ctxsw_trace_entry *entries;
42 int num_entries;
43 bool enabled;
44 void *buf;
45};
46
47int vgpu_fecs_trace_init(struct gk20a *g)
48{
49 struct device *dev = dev_from_gk20a(g);
50 struct device_node *np = dev->of_node;
51 struct of_phandle_args args;
52 struct device_node *hv_np;
53 struct vgpu_fecs_trace *vcst;
54 u32 mempool;
55 int err;
56
57 gk20a_dbg_fn("");
58
59 vcst = nvgpu_kzalloc(g, sizeof(*vcst));
60 if (!vcst)
61 return -ENOMEM;
62
63 err = of_parse_phandle_with_fixed_args(np,
64 "mempool-fecs-trace", 1, 0, &args);
65 if (err) {
66 dev_info(dev_from_gk20a(g), "does not support fecs trace\n");
67 goto fail;
68 }
69 __nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);
70
71 hv_np = args.np;
72 mempool = args.args[0];
73 vcst->cookie = tegra_hv_mempool_reserve(hv_np, mempool);
74 if (IS_ERR(vcst->cookie)) {
75 dev_info(dev_from_gk20a(g),
76 "mempool %u reserve failed\n", mempool);
77 vcst->cookie = NULL;
78 err = -EINVAL;
79 goto fail;
80 }
81
82 vcst->buf = ioremap_cache(vcst->cookie->ipa, vcst->cookie->size);
83 if (!vcst->buf) {
84 dev_info(dev_from_gk20a(g), "ioremap_cache failed\n");
85 err = -EINVAL;
86 goto fail;
87 }
88 vcst->header = vcst->buf;
89 vcst->num_entries = vcst->header->num_ents;
90 if (unlikely(vcst->header->ent_size != sizeof(*vcst->entries))) {
91 dev_err(dev_from_gk20a(g),
92 "entry size mismatch\n");
93 goto fail;
94 }
95 vcst->entries = vcst->buf + sizeof(*vcst->header);
96 g->fecs_trace = (struct gk20a_fecs_trace *)vcst;
97
98 return 0;
99fail:
100 iounmap(vcst->buf);
101 if (vcst->cookie)
102 tegra_hv_mempool_unreserve(vcst->cookie);
103 nvgpu_kfree(g, vcst);
104 return err;
105}
106
107int vgpu_fecs_trace_deinit(struct gk20a *g)
108{
109 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
110
111 iounmap(vcst->buf);
112 tegra_hv_mempool_unreserve(vcst->cookie);
113 nvgpu_kfree(g, vcst);
114 return 0;
115}
116
117int vgpu_fecs_trace_enable(struct gk20a *g)
118{
119 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
120 struct tegra_vgpu_cmd_msg msg = {
121 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_ENABLE,
122 .handle = vgpu_get_handle(g),
123 };
124 int err;
125
126 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
127 err = err ? err : msg.ret;
128 WARN_ON(err);
129 vcst->enabled = !err;
130 return err;
131}
132
133int vgpu_fecs_trace_disable(struct gk20a *g)
134{
135 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
136 struct tegra_vgpu_cmd_msg msg = {
137 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_DISABLE,
138 .handle = vgpu_get_handle(g),
139 };
140 int err;
141
142 vcst->enabled = false;
143 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
144 err = err ? err : msg.ret;
145 WARN_ON(err);
146 return err;
147}
148
149bool vgpu_fecs_trace_is_enabled(struct gk20a *g)
150{
151 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
152
153 return (vcst && vcst->enabled);
154}
155
156int vgpu_fecs_trace_poll(struct gk20a *g)
157{
158 struct tegra_vgpu_cmd_msg msg = {
159 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_POLL,
160 .handle = vgpu_get_handle(g),
161 };
162 int err;
163
164 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
165 err = err ? err : msg.ret;
166 WARN_ON(err);
167 return err;
168}
169
170int vgpu_alloc_user_buffer(struct gk20a *g, void **buf, size_t *size)
171{
172 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
173
174 *buf = vcst->buf;
175 *size = vcst->cookie->size;
176 return 0;
177}
178
179int vgpu_free_user_buffer(struct gk20a *g)
180{
181 return 0;
182}
183
184int vgpu_mmap_user_buffer(struct gk20a *g, struct vm_area_struct *vma)
185{
186 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
187 unsigned long size = vcst->cookie->size;
188 unsigned long vsize = vma->vm_end - vma->vm_start;
189
190 size = min(size, vsize);
191 size = round_up(size, PAGE_SIZE);
192
193 return remap_pfn_range(vma, vma->vm_start,
194 vcst->cookie->ipa >> PAGE_SHIFT,
195 size,
196 vma->vm_page_prot);
197}
198
199int vgpu_fecs_trace_max_entries(struct gk20a *g,
200 struct nvgpu_ctxsw_trace_filter *filter)
201{
202 struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
203
204 return vcst->header->num_ents;
205}
206
207#if NVGPU_CTXSW_FILTER_SIZE != TEGRA_VGPU_FECS_TRACE_FILTER_SIZE
208#error "FECS trace filter size mismatch!"
209#endif
210
211int vgpu_fecs_trace_set_filter(struct gk20a *g,
212 struct nvgpu_ctxsw_trace_filter *filter)
213{
214 struct tegra_vgpu_cmd_msg msg = {
215 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER,
216 .handle = vgpu_get_handle(g),
217 };
218 struct tegra_vgpu_fecs_trace_filter *p = &msg.params.fecs_trace_filter;
219 int err;
220
221 memcpy(&p->tag_bits, &filter->tag_bits, sizeof(p->tag_bits));
222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
223 err = err ? err : msg.ret;
224 WARN_ON(err);
225 return err;
226}
227
228void vgpu_fecs_trace_data_update(struct gk20a *g)
229{
230 gk20a_ctxsw_trace_wake_up(g, 0);
231}
diff --git a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.h b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.h
deleted file mode 100644
index 392b344c..00000000
--- a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __FECS_TRACE_VGPU_H
24#define __FECS_TRACE_VGPU_H
25
26#include <nvgpu/types.h>
27
28struct gk20a;
29struct vm_area_struct;
30struct nvgpu_ctxsw_trace_filter;
31
32void vgpu_fecs_trace_data_update(struct gk20a *g);
33int vgpu_fecs_trace_init(struct gk20a *g);
34int vgpu_fecs_trace_deinit(struct gk20a *g);
35int vgpu_fecs_trace_enable(struct gk20a *g);
36int vgpu_fecs_trace_disable(struct gk20a *g);
37bool vgpu_fecs_trace_is_enabled(struct gk20a *g);
38int vgpu_fecs_trace_poll(struct gk20a *g);
39int vgpu_alloc_user_buffer(struct gk20a *g, void **buf, size_t *size);
40int vgpu_free_user_buffer(struct gk20a *g);
41int vgpu_mmap_user_buffer(struct gk20a *g, struct vm_area_struct *vma);
42int vgpu_fecs_trace_max_entries(struct gk20a *g,
43 struct nvgpu_ctxsw_trace_filter *filter);
44int vgpu_fecs_trace_set_filter(struct gk20a *g,
45 struct nvgpu_ctxsw_trace_filter *filter);
46
47#endif /* __FECS_TRACE_VGPU_H */
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
deleted file mode 100644
index cc56f9f8..00000000
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ /dev/null
@@ -1,828 +0,0 @@
1/*
2 * Virtualized GPU Fifo
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/dma-mapping.h>
26#include <trace/events/gk20a.h>
27#include <uapi/linux/nvgpu.h>
28
29#include <nvgpu/kmem.h>
30#include <nvgpu/dma.h>
31#include <nvgpu/atomic.h>
32#include <nvgpu/bug.h>
33#include <nvgpu/barrier.h>
34
35#include "vgpu/vgpu.h"
36#include "vgpu/fifo_vgpu.h"
37
38#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
39#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
40
41void vgpu_channel_bind(struct channel_gk20a *ch)
42{
43 struct tegra_vgpu_cmd_msg msg;
44 struct tegra_vgpu_channel_config_params *p =
45 &msg.params.channel_config;
46 int err;
47
48 gk20a_dbg_info("bind channel %d", ch->chid);
49
50 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
51 msg.handle = vgpu_get_handle(ch->g);
52 p->handle = ch->virt_ctx;
53 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
54 WARN_ON(err || msg.ret);
55
56 nvgpu_smp_wmb();
57 nvgpu_atomic_set(&ch->bound, true);
58}
59
60void vgpu_channel_unbind(struct channel_gk20a *ch)
61{
62
63 gk20a_dbg_fn("");
64
65 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
66 struct tegra_vgpu_cmd_msg msg;
67 struct tegra_vgpu_channel_config_params *p =
68 &msg.params.channel_config;
69 int err;
70
71 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND;
72 msg.handle = vgpu_get_handle(ch->g);
73 p->handle = ch->virt_ctx;
74 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
75 WARN_ON(err || msg.ret);
76 }
77
78}
79
80int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
81{
82 struct tegra_vgpu_cmd_msg msg;
83 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
84 int err;
85
86 gk20a_dbg_fn("");
87
88 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
89 msg.handle = vgpu_get_handle(g);
90 p->id = ch->chid;
91 p->pid = (u64)current->tgid;
92 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
93 if (err || msg.ret) {
94 nvgpu_err(g, "fail");
95 return -ENOMEM;
96 }
97
98 ch->virt_ctx = p->handle;
99 gk20a_dbg_fn("done");
100 return 0;
101}
102
103void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
104{
105 struct tegra_vgpu_cmd_msg msg;
106 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
107 int err;
108
109 gk20a_dbg_fn("");
110
111 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
112 msg.handle = vgpu_get_handle(g);
113 p->handle = ch->virt_ctx;
114 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
115 WARN_ON(err || msg.ret);
116}
117
118void vgpu_channel_enable(struct channel_gk20a *ch)
119{
120 struct tegra_vgpu_cmd_msg msg;
121 struct tegra_vgpu_channel_config_params *p =
122 &msg.params.channel_config;
123 int err;
124
125 gk20a_dbg_fn("");
126
127 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
128 msg.handle = vgpu_get_handle(ch->g);
129 p->handle = ch->virt_ctx;
130 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
131 WARN_ON(err || msg.ret);
132}
133
134void vgpu_channel_disable(struct channel_gk20a *ch)
135{
136 struct tegra_vgpu_cmd_msg msg;
137 struct tegra_vgpu_channel_config_params *p =
138 &msg.params.channel_config;
139 int err;
140
141 gk20a_dbg_fn("");
142
143 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
144 msg.handle = vgpu_get_handle(ch->g);
145 p->handle = ch->virt_ctx;
146 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
147 WARN_ON(err || msg.ret);
148}
149
150int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
151 u32 gpfifo_entries,
152 unsigned long acquire_timeout, u32 flags)
153{
154 struct device __maybe_unused *d = dev_from_gk20a(ch->g);
155 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
156 struct tegra_vgpu_cmd_msg msg;
157 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
158 int err;
159
160 gk20a_dbg_fn("");
161
162 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
163 msg.handle = vgpu_get_handle(ch->g);
164 p->handle = ch->virt_ctx;
165 p->gpfifo_va = gpfifo_base;
166 p->num_entries = gpfifo_entries;
167 p->userd_addr = ch->userd_iova;
168 p->iova = mapping ? 1 : 0;
169 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
170
171 return (err || msg.ret) ? -ENOMEM : 0;
172}
173
174int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
175{
176 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
177 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
178 u32 i;
179
180 gk20a_dbg_fn("");
181
182 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
183 nvgpu_err(f->g, "num_engines %d larger than max %d",
184 engines->num_engines, TEGRA_VGPU_MAX_ENGINES);
185 return -EINVAL;
186 }
187
188 f->num_engines = engines->num_engines;
189 for (i = 0; i < f->num_engines; i++) {
190 struct fifo_engine_info_gk20a *info =
191 &f->engine_info[engines->info[i].engine_id];
192
193 if (engines->info[i].engine_id >= f->max_engines) {
194 nvgpu_err(f->g, "engine id %d larger than max %d",
195 engines->info[i].engine_id,
196 f->max_engines);
197 return -EINVAL;
198 }
199
200 info->intr_mask = engines->info[i].intr_mask;
201 info->reset_mask = engines->info[i].reset_mask;
202 info->runlist_id = engines->info[i].runlist_id;
203 info->pbdma_id = engines->info[i].pbdma_id;
204 info->inst_id = engines->info[i].inst_id;
205 info->pri_base = engines->info[i].pri_base;
206 info->engine_enum = engines->info[i].engine_enum;
207 info->fault_id = engines->info[i].fault_id;
208 f->active_engines_list[i] = engines->info[i].engine_id;
209 }
210
211 gk20a_dbg_fn("done");
212
213 return 0;
214}
215
216static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
217{
218 struct fifo_runlist_info_gk20a *runlist;
219 struct device *d = dev_from_gk20a(g);
220 unsigned int runlist_id = -1;
221 u32 i;
222 u64 runlist_size;
223
224 gk20a_dbg_fn("");
225
226 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
227 f->runlist_info = nvgpu_kzalloc(g,
228 sizeof(struct fifo_runlist_info_gk20a) *
229 f->max_runlists);
230 if (!f->runlist_info)
231 goto clean_up_runlist;
232
233 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
234 f->max_runlists));
235
236 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
237 runlist = &f->runlist_info[runlist_id];
238
239 runlist->active_channels =
240 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
241 BITS_PER_BYTE));
242 if (!runlist->active_channels)
243 goto clean_up_runlist;
244
245 runlist_size = sizeof(u16) * f->num_channels;
246 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
247 int err = nvgpu_dma_alloc_sys(g, runlist_size,
248 &runlist->mem[i]);
249 if (err) {
250 dev_err(d, "memory allocation failed\n");
251 goto clean_up_runlist;
252 }
253 }
254 nvgpu_mutex_init(&runlist->mutex);
255
256 /* None of buffers is pinned if this value doesn't change.
257 Otherwise, one of them (cur_buffer) must have been pinned. */
258 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
259 }
260
261 gk20a_dbg_fn("done");
262 return 0;
263
264clean_up_runlist:
265 gk20a_fifo_delete_runlist(f);
266 gk20a_dbg_fn("fail");
267 return -ENOMEM;
268}
269
270static int vgpu_init_fifo_setup_sw(struct gk20a *g)
271{
272 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
273 struct fifo_gk20a *f = &g->fifo;
274 struct device *d = dev_from_gk20a(g);
275 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
276 unsigned int chid;
277 int err = 0;
278
279 gk20a_dbg_fn("");
280
281 if (f->sw_ready) {
282 gk20a_dbg_fn("skip init");
283 return 0;
284 }
285
286 f->g = g;
287 f->num_channels = priv->constants.num_channels;
288 f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
289
290 f->userd_entry_size = 1 << ram_userd_base_shift_v();
291
292 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
293 &f->userd);
294 if (err) {
295 dev_err(d, "memory allocation failed\n");
296 goto clean_up;
297 }
298
299 /* bar1 va */
300 if (g->ops.mm.is_bar1_supported(g)) {
301 f->userd.gpu_va = vgpu_bar1_map(g, &f->userd.priv.sgt,
302 f->userd.size);
303 if (!f->userd.gpu_va) {
304 dev_err(d, "gmmu mapping failed\n");
305 goto clean_up;
306 }
307 /* if reduced BAR1 range is specified, use offset of 0
308 * (server returns offset assuming full BAR1 range)
309 */
310 if (resource_size(l->bar1_mem) ==
311 (resource_size_t)f->userd.size)
312 f->userd.gpu_va = 0;
313 }
314
315 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
316
317 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
318 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
319 f->engine_info = nvgpu_kzalloc(g, f->max_engines *
320 sizeof(*f->engine_info));
321 f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
322
323 if (!(f->channel && f->tsg && f->engine_info && f->active_engines_list)) {
324 err = -ENOMEM;
325 goto clean_up;
326 }
327 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
328
329 g->ops.fifo.init_engine_info(f);
330
331 init_runlist(g, f);
332
333 nvgpu_init_list_node(&f->free_chs);
334 nvgpu_mutex_init(&f->free_chs_mutex);
335
336 for (chid = 0; chid < f->num_channels; chid++) {
337 f->channel[chid].userd_iova =
338 nvgpu_mem_get_addr(g, &f->userd) +
339 chid * f->userd_entry_size;
340 f->channel[chid].userd_gpu_va =
341 f->userd.gpu_va + chid * f->userd_entry_size;
342
343 gk20a_init_channel_support(g, chid);
344 gk20a_init_tsg_support(g, chid);
345 }
346 nvgpu_mutex_init(&f->tsg_inuse_mutex);
347
348 err = nvgpu_channel_worker_init(g);
349 if (err)
350 goto clean_up;
351
352 f->deferred_reset_pending = false;
353 nvgpu_mutex_init(&f->deferred_reset_mutex);
354
355 f->channel_base = priv->constants.channel_base;
356
357 f->sw_ready = true;
358
359 gk20a_dbg_fn("done");
360 return 0;
361
362clean_up:
363 gk20a_dbg_fn("fail");
364 /* FIXME: unmap from bar1 */
365 nvgpu_dma_free(g, &f->userd);
366
367 memset(&f->userd, 0, sizeof(f->userd));
368
369 nvgpu_vfree(g, f->channel);
370 f->channel = NULL;
371 nvgpu_vfree(g, f->tsg);
372 f->tsg = NULL;
373 nvgpu_kfree(g, f->engine_info);
374 f->engine_info = NULL;
375 nvgpu_kfree(g, f->active_engines_list);
376 f->active_engines_list = NULL;
377
378 return err;
379}
380
381int vgpu_init_fifo_setup_hw(struct gk20a *g)
382{
383 gk20a_dbg_fn("");
384
385 /* test write, read through bar1 @ userd region before
386 * turning on the snooping */
387 {
388 struct fifo_gk20a *f = &g->fifo;
389 u32 v, v1 = 0x33, v2 = 0x55;
390
391 u32 bar1_vaddr = f->userd.gpu_va;
392 volatile u32 *cpu_vaddr = f->userd.cpu_va;
393
394 gk20a_dbg_info("test bar1 @ vaddr 0x%x",
395 bar1_vaddr);
396
397 v = gk20a_bar1_readl(g, bar1_vaddr);
398
399 *cpu_vaddr = v1;
400 nvgpu_mb();
401
402 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
403 nvgpu_err(g, "bar1 broken @ gk20a!");
404 return -EINVAL;
405 }
406
407 gk20a_bar1_writel(g, bar1_vaddr, v2);
408
409 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
410 nvgpu_err(g, "bar1 broken @ gk20a!");
411 return -EINVAL;
412 }
413
414 /* is it visible to the cpu? */
415 if (*cpu_vaddr != v2) {
416 nvgpu_err(g, "cpu didn't see bar1 write @ %p!",
417 cpu_vaddr);
418 }
419
420 /* put it back */
421 gk20a_bar1_writel(g, bar1_vaddr, v);
422 }
423
424 gk20a_dbg_fn("done");
425
426 return 0;
427}
428
429int vgpu_init_fifo_support(struct gk20a *g)
430{
431 u32 err;
432
433 gk20a_dbg_fn("");
434
435 err = vgpu_init_fifo_setup_sw(g);
436 if (err)
437 return err;
438
439 if (g->ops.fifo.init_fifo_setup_hw)
440 err = g->ops.fifo.init_fifo_setup_hw(g);
441 return err;
442}
443
444int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
445{
446 struct fifo_gk20a *f = &g->fifo;
447 struct channel_gk20a *ch = &f->channel[chid];
448 struct tegra_vgpu_cmd_msg msg;
449 struct tegra_vgpu_channel_config_params *p =
450 &msg.params.channel_config;
451 int err;
452
453 gk20a_dbg_fn("");
454
455 if (!nvgpu_atomic_read(&ch->bound))
456 return 0;
457
458 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT;
459 msg.handle = vgpu_get_handle(g);
460 p->handle = ch->virt_ctx;
461 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
462
463 if (err || msg.ret) {
464 nvgpu_err(g,
465 "preempt channel %d failed", chid);
466 err = -ENOMEM;
467 }
468
469 return err;
470}
471
472int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
473{
474 struct tegra_vgpu_cmd_msg msg;
475 struct tegra_vgpu_tsg_preempt_params *p =
476 &msg.params.tsg_preempt;
477 int err;
478
479 gk20a_dbg_fn("");
480
481 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
482 msg.handle = vgpu_get_handle(g);
483 p->tsg_id = tsgid;
484 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
485 err = err ? err : msg.ret;
486
487 if (err) {
488 nvgpu_err(g,
489 "preempt tsg %u failed", tsgid);
490 }
491
492 return err;
493}
494
495static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
496 u16 *runlist, u32 num_entries)
497{
498 struct tegra_vgpu_cmd_msg msg;
499 struct tegra_vgpu_runlist_params *p;
500 int err;
501 void *oob_handle;
502 void *oob;
503 size_t size, oob_size;
504
505 oob_handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
506 tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
507 &oob, &oob_size);
508 if (!oob_handle)
509 return -EINVAL;
510
511 size = sizeof(*runlist) * num_entries;
512 if (oob_size < size) {
513 err = -ENOMEM;
514 goto done;
515 }
516
517 msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
518 msg.handle = handle;
519 p = &msg.params.runlist;
520 p->runlist_id = runlist_id;
521 p->num_entries = num_entries;
522
523 memcpy(oob, runlist, size);
524 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
525
526 err = (err || msg.ret) ? -1 : 0;
527
528done:
529 tegra_gr_comm_oob_put_ptr(oob_handle);
530 return err;
531}
532
533static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
534 u32 chid, bool add,
535 bool wait_for_finish)
536{
537 struct fifo_gk20a *f = &g->fifo;
538 struct fifo_runlist_info_gk20a *runlist;
539 u16 *runlist_entry = NULL;
540 u32 count = 0;
541
542 gk20a_dbg_fn("");
543
544 runlist = &f->runlist_info[runlist_id];
545
546 /* valid channel, add/remove it from active list.
547 Otherwise, keep active list untouched for suspend/resume. */
548 if (chid != (u32)~0) {
549 if (add) {
550 if (test_and_set_bit(chid,
551 runlist->active_channels) == 1)
552 return 0;
553 } else {
554 if (test_and_clear_bit(chid,
555 runlist->active_channels) == 0)
556 return 0;
557 }
558 }
559
560 if (chid != (u32)~0 || /* add/remove a valid channel */
561 add /* resume to add all channels back */) {
562 u32 cid;
563
564 runlist_entry = runlist->mem[0].cpu_va;
565 for_each_set_bit(cid,
566 runlist->active_channels, f->num_channels) {
567 gk20a_dbg_info("add channel %d to runlist", cid);
568 runlist_entry[0] = cid;
569 runlist_entry++;
570 count++;
571 }
572 } else /* suspend to remove all channels */
573 count = 0;
574
575 return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
576 runlist->mem[0].cpu_va, count);
577}
578
579/* add/remove a channel from runlist
580 special cases below: runlist->active_channels will NOT be changed.
581 (chid == ~0 && !add) means remove all active channels from runlist.
582 (chid == ~0 && add) means restore all active channels on runlist. */
583int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
584 u32 chid, bool add, bool wait_for_finish)
585{
586 struct fifo_runlist_info_gk20a *runlist = NULL;
587 struct fifo_gk20a *f = &g->fifo;
588 u32 ret = 0;
589
590 gk20a_dbg_fn("");
591
592 runlist = &f->runlist_info[runlist_id];
593
594 nvgpu_mutex_acquire(&runlist->mutex);
595
596 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
597 wait_for_finish);
598
599 nvgpu_mutex_release(&runlist->mutex);
600 return ret;
601}
602
603int vgpu_fifo_wait_engine_idle(struct gk20a *g)
604{
605 gk20a_dbg_fn("");
606
607 return 0;
608}
609
610static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g,
611 u32 tsgid,
612 u32 runlist_id,
613 u32 new_level)
614{
615 struct tegra_vgpu_cmd_msg msg = {0};
616 struct tegra_vgpu_tsg_runlist_interleave_params *p =
617 &msg.params.tsg_interleave;
618 int err;
619
620 gk20a_dbg_fn("");
621
622 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
623 msg.handle = vgpu_get_handle(g);
624 p->tsg_id = tsgid;
625 p->level = new_level;
626 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
627 WARN_ON(err || msg.ret);
628 return err ? err : msg.ret;
629}
630
631int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
632 u32 id,
633 bool is_tsg,
634 u32 runlist_id,
635 u32 new_level)
636{
637 struct tegra_vgpu_cmd_msg msg;
638 struct tegra_vgpu_channel_runlist_interleave_params *p =
639 &msg.params.channel_interleave;
640 struct channel_gk20a *ch;
641 int err;
642
643 gk20a_dbg_fn("");
644
645 if (is_tsg)
646 return vgpu_fifo_tsg_set_runlist_interleave(g, id,
647 runlist_id, new_level);
648
649 ch = &g->fifo.channel[id];
650 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE;
651 msg.handle = vgpu_get_handle(ch->g);
652 p->handle = ch->virt_ctx;
653 p->level = new_level;
654 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
655 WARN_ON(err || msg.ret);
656 return err ? err : msg.ret;
657}
658
659int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
660{
661 struct tegra_vgpu_cmd_msg msg;
662 struct tegra_vgpu_channel_timeslice_params *p =
663 &msg.params.channel_timeslice;
664 int err;
665
666 gk20a_dbg_fn("");
667
668 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE;
669 msg.handle = vgpu_get_handle(ch->g);
670 p->handle = ch->virt_ctx;
671 p->timeslice_us = timeslice;
672 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
673 err = err ? err : msg.ret;
674 WARN_ON(err);
675 if (!err)
676 ch->timeslice_us = p->timeslice_us;
677 return err;
678}
679
680int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
681 u32 err_code, bool verbose)
682{
683 struct tsg_gk20a *tsg = NULL;
684 struct channel_gk20a *ch_tsg = NULL;
685 struct gk20a *g = ch->g;
686 struct tegra_vgpu_cmd_msg msg = {0};
687 struct tegra_vgpu_channel_config_params *p =
688 &msg.params.channel_config;
689 int err;
690
691 gk20a_dbg_fn("");
692
693 if (gk20a_is_channel_marked_as_tsg(ch)) {
694 tsg = &g->fifo.tsg[ch->tsgid];
695
696 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
697
698 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
699 if (gk20a_channel_get(ch_tsg)) {
700 gk20a_set_error_notifier(ch_tsg, err_code);
701 ch_tsg->has_timedout = true;
702 gk20a_channel_put(ch_tsg);
703 }
704 }
705
706 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
707 } else {
708 gk20a_set_error_notifier(ch, err_code);
709 ch->has_timedout = true;
710 }
711
712 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET;
713 msg.handle = vgpu_get_handle(ch->g);
714 p->handle = ch->virt_ctx;
715 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
716 WARN_ON(err || msg.ret);
717 if (!err)
718 gk20a_channel_abort(ch, false);
719 return err ? err : msg.ret;
720}
721
722static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
723 struct channel_gk20a *ch)
724{
725 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
726 if (ch->error_notifier_ref) {
727 if (ch->error_notifier->status == 0xffff) {
728 /* If error code is already set, this mmu fault
729 * was triggered as part of recovery from other
730 * error condition.
731 * Don't overwrite error flag. */
732 } else {
733 gk20a_set_error_notifier_locked(ch,
734 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
735 }
736 }
737 nvgpu_mutex_release(&ch->error_notifier_mutex);
738
739 /* mark channel as faulted */
740 ch->has_timedout = true;
741 nvgpu_smp_wmb();
742 /* unblock pending waits */
743 nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
744 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
745}
746
747static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
748 struct channel_gk20a *ch)
749{
750 struct tsg_gk20a *tsg = NULL;
751 struct channel_gk20a *ch_tsg = NULL;
752
753 if (gk20a_is_channel_marked_as_tsg(ch)) {
754 tsg = &g->fifo.tsg[ch->tsgid];
755
756 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
757
758 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
759 if (gk20a_channel_get(ch_tsg)) {
760 vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
761 gk20a_channel_put(ch_tsg);
762 }
763 }
764
765 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
766 } else {
767 vgpu_fifo_set_ctx_mmu_error_ch(g, ch);
768 }
769}
770
771int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
772{
773 struct fifo_gk20a *f = &g->fifo;
774 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
775
776 gk20a_dbg_fn("");
777 if (!ch)
778 return 0;
779
780 nvgpu_err(g, "fifo intr (%d) on ch %u",
781 info->type, info->chid);
782
783 trace_gk20a_channel_reset(ch->chid, ch->tsgid);
784
785 switch (info->type) {
786 case TEGRA_VGPU_FIFO_INTR_PBDMA:
787 gk20a_set_error_notifier(ch, NVGPU_CHANNEL_PBDMA_ERROR);
788 break;
789 case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT:
790 gk20a_set_error_notifier(ch,
791 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
792 break;
793 case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
794 vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);
795 gk20a_channel_abort(ch, false);
796 break;
797 default:
798 WARN_ON(1);
799 break;
800 }
801
802 gk20a_channel_put(ch);
803 return 0;
804}
805
806int vgpu_fifo_nonstall_isr(struct gk20a *g,
807 struct tegra_vgpu_fifo_nonstall_intr_info *info)
808{
809 gk20a_dbg_fn("");
810
811 switch (info->type) {
812 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL:
813 gk20a_channel_semaphore_wakeup(g, false);
814 break;
815 default:
816 WARN_ON(1);
817 break;
818 }
819
820 return 0;
821}
822
823u32 vgpu_fifo_default_timeslice_us(struct gk20a *g)
824{
825 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
826
827 return priv->constants.default_timeslice_us;
828}
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h
deleted file mode 100644
index 7633ad95..00000000
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _FIFO_VGPU_H_
24#define _FIFO_VGPU_H_
25
26#include <nvgpu/types.h>
27
28struct gk20a;
29struct channel_gk20a;
30struct fifo_gk20a;
31struct tsg_gk20a;
32
33int vgpu_init_fifo_setup_hw(struct gk20a *g);
34void vgpu_channel_bind(struct channel_gk20a *ch);
35void vgpu_channel_unbind(struct channel_gk20a *ch);
36int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
37void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch);
38void vgpu_channel_enable(struct channel_gk20a *ch);
39void vgpu_channel_disable(struct channel_gk20a *ch);
40int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
41 u32 gpfifo_entries,
42 unsigned long acquire_timeout, u32 flags);
43int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
44int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid);
45int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
46int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
47 u32 chid, bool add, bool wait_for_finish);
48int vgpu_fifo_wait_engine_idle(struct gk20a *g);
49int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
50 u32 id,
51 bool is_tsg,
52 u32 runlist_id,
53 u32 new_level);
54int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice);
55int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
56 u32 err_code, bool verbose);
57u32 vgpu_fifo_default_timeslice_us(struct gk20a *g);
58int vgpu_tsg_open(struct tsg_gk20a *tsg);
59int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
60 struct channel_gk20a *ch);
61int vgpu_tsg_unbind_channel(struct channel_gk20a *ch);
62int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
63int vgpu_enable_tsg(struct tsg_gk20a *tsg);
64
65#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c
deleted file mode 100644
index 0e440241..00000000
--- a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/enabled.h>
24
25#include "gk20a/gk20a.h"
26#include "gk20a/css_gr_gk20a.h"
27#include "vgpu/css_vgpu.h"
28#include "vgpu_gr_gm20b.h"
29
30void vgpu_gr_gm20b_init_cyclestats(struct gk20a *g)
31{
32#if defined(CONFIG_GK20A_CYCLE_STATS)
33 bool snapshots_supported = true;
34
35 /* cyclestats not supported on vgpu */
36 __nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, false);
37
38 g->gr.max_css_buffer_size = vgpu_css_get_buffer_size(g);
39
40 /* snapshots not supported if the buffer size is 0 */
41 if (g->gr.max_css_buffer_size == 0)
42 snapshots_supported = false;
43
44 __nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT,
45 snapshots_supported);
46#endif
47}
48
49int vgpu_gm20b_init_fs_state(struct gk20a *g)
50{
51 struct gr_gk20a *gr = &g->gr;
52 u32 tpc_index, gpc_index;
53 u32 sm_id = 0;
54
55 gk20a_dbg_fn("");
56
57 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
58 for (tpc_index = 0; tpc_index < gr->gpc_tpc_count[gpc_index];
59 tpc_index++) {
60 g->gr.sm_to_cluster[sm_id].tpc_index = tpc_index;
61 g->gr.sm_to_cluster[sm_id].gpc_index = gpc_index;
62
63 sm_id++;
64 }
65 }
66
67 gr->no_of_sm = sm_id;
68 return 0;
69}
diff --git a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.h b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.h
deleted file mode 100644
index a0a89579..00000000
--- a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_GR_GM20B_H__
24#define __VGPU_GR_GM20B_H__
25
26#include "gk20a/gk20a.h"
27
28void vgpu_gr_gm20b_init_cyclestats(struct gk20a *g);
29int vgpu_gm20b_init_fs_state(struct gk20a *g);
30
31#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_hal_gm20b.c b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_hal_gm20b.c
deleted file mode 100644
index a3eb59ac..00000000
--- a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_hal_gm20b.c
+++ /dev/null
@@ -1,588 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gm20b/hal_gm20b.h"
24#include "vgpu/vgpu.h"
25#include "vgpu/fifo_vgpu.h"
26#include "vgpu/gr_vgpu.h"
27#include "vgpu/ltc_vgpu.h"
28#include "vgpu/mm_vgpu.h"
29#include "vgpu/dbg_vgpu.h"
30#include "vgpu/fecs_trace_vgpu.h"
31#include "vgpu/css_vgpu.h"
32#include "vgpu_gr_gm20b.h"
33
34#include "gk20a/bus_gk20a.h"
35#include "gk20a/flcn_gk20a.h"
36#include "gk20a/mc_gk20a.h"
37#include "gk20a/fb_gk20a.h"
38
39#include "gm20b/gr_gm20b.h"
40#include "gm20b/fifo_gm20b.h"
41#include "gm20b/acr_gm20b.h"
42#include "gm20b/pmu_gm20b.h"
43#include "gm20b/fb_gm20b.h"
44#include "gm20b/bus_gm20b.h"
45#include "gm20b/regops_gm20b.h"
46#include "gm20b/clk_gm20b.h"
47#include "gm20b/therm_gm20b.h"
48#include "gm20b/mm_gm20b.h"
49#include "gm20b/gr_ctx_gm20b.h"
50#include "gm20b/gm20b_gating_reglist.h"
51#include "gm20b/ltc_gm20b.h"
52
53#include <nvgpu/enabled.h>
54
55#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
56#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
57#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
58#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
59
60static const struct gpu_ops vgpu_gm20b_ops = {
61 .ltc = {
62 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
63 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
64 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
65 .init_cbc = gm20b_ltc_init_cbc,
66 .init_fs_state = vgpu_ltc_init_fs_state,
67 .init_comptags = vgpu_ltc_init_comptags,
68 .cbc_ctrl = NULL,
69 .isr = gm20b_ltc_isr,
70 .cbc_fix_config = gm20b_ltc_cbc_fix_config,
71 .flush = gm20b_flush_ltc,
72 .set_enabled = gm20b_ltc_set_enabled,
73 },
74 .ce2 = {
75 .isr_stall = gk20a_ce2_isr,
76 .isr_nonstall = gk20a_ce2_nonstall_isr,
77 .get_num_pce = vgpu_ce_get_num_pce,
78 },
79 .gr = {
80 .get_patch_slots = gr_gk20a_get_patch_slots,
81 .init_gpc_mmu = gr_gm20b_init_gpc_mmu,
82 .bundle_cb_defaults = gr_gm20b_bundle_cb_defaults,
83 .cb_size_default = gr_gm20b_cb_size_default,
84 .calc_global_ctx_buffer_size =
85 gr_gm20b_calc_global_ctx_buffer_size,
86 .commit_global_attrib_cb = gr_gm20b_commit_global_attrib_cb,
87 .commit_global_bundle_cb = gr_gm20b_commit_global_bundle_cb,
88 .commit_global_cb_manager = gr_gm20b_commit_global_cb_manager,
89 .commit_global_pagepool = gr_gm20b_commit_global_pagepool,
90 .handle_sw_method = gr_gm20b_handle_sw_method,
91 .set_alpha_circular_buffer_size =
92 gr_gm20b_set_alpha_circular_buffer_size,
93 .set_circular_buffer_size = gr_gm20b_set_circular_buffer_size,
94 .enable_hww_exceptions = gr_gk20a_enable_hww_exceptions,
95 .is_valid_class = gr_gm20b_is_valid_class,
96 .is_valid_gfx_class = gr_gm20b_is_valid_gfx_class,
97 .is_valid_compute_class = gr_gm20b_is_valid_compute_class,
98 .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
99 .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
100 .init_fs_state = vgpu_gm20b_init_fs_state,
101 .set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
102 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
103 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
104 .set_gpc_tpc_mask = gr_gm20b_set_gpc_tpc_mask,
105 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
106 .free_channel_ctx = vgpu_gr_free_channel_ctx,
107 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
108 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
109 .get_zcull_info = vgpu_gr_get_zcull_info,
110 .is_tpc_addr = gr_gm20b_is_tpc_addr,
111 .get_tpc_num = gr_gm20b_get_tpc_num,
112 .detect_sm_arch = vgpu_gr_detect_sm_arch,
113 .add_zbc_color = gr_gk20a_add_zbc_color,
114 .add_zbc_depth = gr_gk20a_add_zbc_depth,
115 .zbc_set_table = vgpu_gr_add_zbc,
116 .zbc_query_table = vgpu_gr_query_zbc,
117 .pmu_save_zbc = gk20a_pmu_save_zbc,
118 .add_zbc = gr_gk20a_add_zbc,
119 .pagepool_default_size = gr_gm20b_pagepool_default_size,
120 .init_ctx_state = vgpu_gr_init_ctx_state,
121 .alloc_gr_ctx = vgpu_gr_alloc_gr_ctx,
122 .free_gr_ctx = vgpu_gr_free_gr_ctx,
123 .update_ctxsw_preemption_mode =
124 gr_gm20b_update_ctxsw_preemption_mode,
125 .dump_gr_regs = NULL,
126 .update_pc_sampling = gr_gm20b_update_pc_sampling,
127 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
128 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
129 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
130 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
131 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
132 .init_sm_dsm_reg_info = gr_gm20b_init_sm_dsm_reg_info,
133 .wait_empty = gr_gk20a_wait_idle,
134 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
135 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
136 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
137 .bpt_reg_info = gr_gm20b_bpt_reg_info,
138 .get_access_map = gr_gm20b_get_access_map,
139 .handle_fecs_error = gk20a_gr_handle_fecs_error,
140 .handle_sm_exception = gr_gk20a_handle_sm_exception,
141 .handle_tex_exception = gr_gk20a_handle_tex_exception,
142 .enable_gpc_exceptions = gk20a_gr_enable_gpc_exceptions,
143 .enable_exceptions = gk20a_gr_enable_exceptions,
144 .get_lrf_tex_ltc_dram_override = NULL,
145 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
146 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
147 .record_sm_error_state = gm20b_gr_record_sm_error_state,
148 .update_sm_error_state = gm20b_gr_update_sm_error_state,
149 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
150 .suspend_contexts = vgpu_gr_suspend_contexts,
151 .resume_contexts = vgpu_gr_resume_contexts,
152 .get_preemption_mode_flags = gr_gm20b_get_preemption_mode_flags,
153 .init_sm_id_table = gr_gk20a_init_sm_id_table,
154 .load_smid_config = gr_gm20b_load_smid_config,
155 .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering,
156 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
157 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
158 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
159 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
160 .setup_rop_mapping = gr_gk20a_setup_rop_mapping,
161 .program_zcull_mapping = gr_gk20a_program_zcull_mapping,
162 .commit_global_timeslice = gr_gk20a_commit_global_timeslice,
163 .commit_inst = vgpu_gr_commit_inst,
164 .write_zcull_ptr = gr_gk20a_write_zcull_ptr,
165 .write_pm_ptr = gr_gk20a_write_pm_ptr,
166 .init_elcg_mode = gr_gk20a_init_elcg_mode,
167 .load_tpc_mask = gr_gm20b_load_tpc_mask,
168 .inval_icache = gr_gk20a_inval_icache,
169 .trigger_suspend = gr_gk20a_trigger_suspend,
170 .wait_for_pause = gr_gk20a_wait_for_pause,
171 .resume_from_pause = gr_gk20a_resume_from_pause,
172 .clear_sm_errors = gr_gk20a_clear_sm_errors,
173 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
174 .get_esr_sm_sel = gk20a_gr_get_esr_sm_sel,
175 .sm_debugger_attached = gk20a_gr_sm_debugger_attached,
176 .suspend_single_sm = gk20a_gr_suspend_single_sm,
177 .suspend_all_sms = gk20a_gr_suspend_all_sms,
178 .resume_single_sm = gk20a_gr_resume_single_sm,
179 .resume_all_sms = gk20a_gr_resume_all_sms,
180 .get_sm_hww_warp_esr = gk20a_gr_get_sm_hww_warp_esr,
181 .get_sm_hww_global_esr = gk20a_gr_get_sm_hww_global_esr,
182 .get_sm_no_lock_down_hww_global_esr_mask =
183 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask,
184 .lock_down_sm = gk20a_gr_lock_down_sm,
185 .wait_for_sm_lock_down = gk20a_gr_wait_for_sm_lock_down,
186 .clear_sm_hww = gm20b_gr_clear_sm_hww,
187 .init_ovr_sm_dsm_perf = gk20a_gr_init_ovr_sm_dsm_perf,
188 .get_ovr_perf_regs = gk20a_gr_get_ovr_perf_regs,
189 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
190 .init_ctxsw_hdr_data = gk20a_gr_init_ctxsw_hdr_data,
191 .set_boosted_ctx = NULL,
192 .update_boosted_ctx = NULL,
193 },
194 .fb = {
195 .reset = fb_gk20a_reset,
196 .init_hw = gk20a_fb_init_hw,
197 .init_fs_state = fb_gm20b_init_fs_state,
198 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
199 .set_use_full_comp_tag_line =
200 gm20b_fb_set_use_full_comp_tag_line,
201 .compression_page_size = gm20b_fb_compression_page_size,
202 .compressible_page_size = gm20b_fb_compressible_page_size,
203 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
204 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
205 .read_wpr_info = gm20b_fb_read_wpr_info,
206 .is_debug_mode_enabled = NULL,
207 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
208 .tlb_invalidate = vgpu_mm_tlb_invalidate,
209 },
210 .clock_gating = {
211 .slcg_bus_load_gating_prod =
212 gm20b_slcg_bus_load_gating_prod,
213 .slcg_ce2_load_gating_prod =
214 gm20b_slcg_ce2_load_gating_prod,
215 .slcg_chiplet_load_gating_prod =
216 gm20b_slcg_chiplet_load_gating_prod,
217 .slcg_ctxsw_firmware_load_gating_prod =
218 gm20b_slcg_ctxsw_firmware_load_gating_prod,
219 .slcg_fb_load_gating_prod =
220 gm20b_slcg_fb_load_gating_prod,
221 .slcg_fifo_load_gating_prod =
222 gm20b_slcg_fifo_load_gating_prod,
223 .slcg_gr_load_gating_prod =
224 gr_gm20b_slcg_gr_load_gating_prod,
225 .slcg_ltc_load_gating_prod =
226 ltc_gm20b_slcg_ltc_load_gating_prod,
227 .slcg_perf_load_gating_prod =
228 gm20b_slcg_perf_load_gating_prod,
229 .slcg_priring_load_gating_prod =
230 gm20b_slcg_priring_load_gating_prod,
231 .slcg_pmu_load_gating_prod =
232 gm20b_slcg_pmu_load_gating_prod,
233 .slcg_therm_load_gating_prod =
234 gm20b_slcg_therm_load_gating_prod,
235 .slcg_xbar_load_gating_prod =
236 gm20b_slcg_xbar_load_gating_prod,
237 .blcg_bus_load_gating_prod =
238 gm20b_blcg_bus_load_gating_prod,
239 .blcg_ctxsw_firmware_load_gating_prod =
240 gm20b_blcg_ctxsw_firmware_load_gating_prod,
241 .blcg_fb_load_gating_prod =
242 gm20b_blcg_fb_load_gating_prod,
243 .blcg_fifo_load_gating_prod =
244 gm20b_blcg_fifo_load_gating_prod,
245 .blcg_gr_load_gating_prod =
246 gm20b_blcg_gr_load_gating_prod,
247 .blcg_ltc_load_gating_prod =
248 gm20b_blcg_ltc_load_gating_prod,
249 .blcg_pwr_csb_load_gating_prod =
250 gm20b_blcg_pwr_csb_load_gating_prod,
251 .blcg_xbar_load_gating_prod =
252 gm20b_blcg_xbar_load_gating_prod,
253 .blcg_pmu_load_gating_prod =
254 gm20b_blcg_pmu_load_gating_prod,
255 .pg_gr_load_gating_prod =
256 gr_gm20b_pg_gr_load_gating_prod,
257 },
258 .fifo = {
259 .init_fifo_setup_hw = vgpu_init_fifo_setup_hw,
260 .bind_channel = vgpu_channel_bind,
261 .unbind_channel = vgpu_channel_unbind,
262 .disable_channel = vgpu_channel_disable,
263 .enable_channel = vgpu_channel_enable,
264 .alloc_inst = vgpu_channel_alloc_inst,
265 .free_inst = vgpu_channel_free_inst,
266 .setup_ramfc = vgpu_channel_setup_ramfc,
267 .channel_set_timeslice = vgpu_channel_set_timeslice,
268 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
269 .setup_userd = gk20a_fifo_setup_userd,
270 .userd_gp_get = gk20a_fifo_userd_gp_get,
271 .userd_gp_put = gk20a_fifo_userd_gp_put,
272 .userd_pb_get = gk20a_fifo_userd_pb_get,
273 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
274 .preempt_channel = vgpu_fifo_preempt_channel,
275 .preempt_tsg = vgpu_fifo_preempt_tsg,
276 .enable_tsg = vgpu_enable_tsg,
277 .disable_tsg = gk20a_disable_tsg,
278 .tsg_verify_channel_status = NULL,
279 .tsg_verify_status_ctx_reload = NULL,
280 .update_runlist = vgpu_fifo_update_runlist,
281 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
282 .get_mmu_fault_info = gk20a_fifo_get_mmu_fault_info,
283 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
284 .get_num_fifos = gm20b_fifo_get_num_fifos,
285 .get_pbdma_signature = gk20a_fifo_get_pbdma_signature,
286 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
287 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
288 .tsg_open = vgpu_tsg_open,
289 .force_reset_ch = vgpu_fifo_force_reset_ch,
290 .engine_enum_from_type = gk20a_fifo_engine_enum_from_type,
291 .device_info_data_parse = gm20b_device_info_data_parse,
292 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
293 .init_engine_info = vgpu_fifo_init_engine_info,
294 .runlist_entry_size = ram_rl_entry_size_v,
295 .get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
296 .get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
297 .is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
298 .dump_pbdma_status = gk20a_dump_pbdma_status,
299 .dump_eng_status = gk20a_dump_eng_status,
300 .dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
301 .intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
302 .is_preempt_pending = gk20a_fifo_is_preempt_pending,
303 .init_pbdma_intr_descs = gm20b_fifo_init_pbdma_intr_descs,
304 .reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
305 .teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
306 .handle_sched_error = gk20a_fifo_handle_sched_error,
307 .handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0,
308 .handle_pbdma_intr_1 = gk20a_fifo_handle_pbdma_intr_1,
309 .tsg_bind_channel = vgpu_tsg_bind_channel,
310 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
311#ifdef CONFIG_TEGRA_GK20A_NVHOST
312 .alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf,
313 .free_syncpt_buf = gk20a_fifo_free_syncpt_buf,
314 .add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd,
315 .get_syncpt_wait_cmd_size = gk20a_fifo_get_syncpt_wait_cmd_size,
316 .add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd,
317 .get_syncpt_incr_cmd_size = gk20a_fifo_get_syncpt_incr_cmd_size,
318#endif
319 },
320 .gr_ctx = {
321 .get_netlist_name = gr_gm20b_get_netlist_name,
322 .is_fw_defined = gr_gm20b_is_firmware_defined,
323 },
324 .mm = {
325 .support_sparse = gm20b_mm_support_sparse,
326 .gmmu_map = vgpu_locked_gmmu_map,
327 .gmmu_unmap = vgpu_locked_gmmu_unmap,
328 .vm_bind_channel = vgpu_vm_bind_channel,
329 .fb_flush = vgpu_mm_fb_flush,
330 .l2_invalidate = vgpu_mm_l2_invalidate,
331 .l2_flush = vgpu_mm_l2_flush,
332 .cbc_clean = gk20a_mm_cbc_clean,
333 .set_big_page_size = gm20b_mm_set_big_page_size,
334 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
335 .get_default_big_page_size = gm20b_mm_get_default_big_page_size,
336 .gpu_phys_addr = gm20b_gpu_phys_addr,
337 .get_iommu_bit = gk20a_mm_get_iommu_bit,
338 .get_mmu_levels = gk20a_mm_get_mmu_levels,
339 .init_pdb = gk20a_mm_init_pdb,
340 .init_mm_setup_hw = NULL,
341 .is_bar1_supported = gm20b_mm_is_bar1_supported,
342 .init_inst_block = gk20a_init_inst_block,
343 .mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
344 .get_kind_invalid = gm20b_get_kind_invalid,
345 .get_kind_pitch = gm20b_get_kind_pitch,
346 },
347 .therm = {
348 .init_therm_setup_hw = gm20b_init_therm_setup_hw,
349 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters,
350 },
351 .pmu = {
352 .pmu_setup_elpg = gm20b_pmu_setup_elpg,
353 .pmu_get_queue_head = pwr_pmu_queue_head_r,
354 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
355 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
356 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
357 .pmu_queue_head = gk20a_pmu_queue_head,
358 .pmu_queue_tail = gk20a_pmu_queue_tail,
359 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
360 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
361 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
362 .pmu_mutex_release = gk20a_pmu_mutex_release,
363 .write_dmatrfbase = gm20b_write_dmatrfbase,
364 .pmu_elpg_statistics = gk20a_pmu_elpg_statistics,
365 .pmu_pg_init_param = NULL,
366 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
367 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
368 .pmu_is_lpwr_feature_supported = NULL,
369 .pmu_lpwr_enable_pg = NULL,
370 .pmu_lpwr_disable_pg = NULL,
371 .pmu_pg_param_post_init = NULL,
372 .dump_secure_fuses = pmu_dump_security_fuses_gm20b,
373 .reset_engine = gk20a_pmu_engine_reset,
374 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
375 },
376 .clk = {
377 .init_clk_support = gm20b_init_clk_support,
378 .suspend_clk_support = gm20b_suspend_clk_support,
379#ifdef CONFIG_DEBUG_FS
380 .init_debugfs = gm20b_clk_init_debugfs,
381#endif
382 .get_voltage = gm20b_clk_get_voltage,
383 .get_gpcclk_clock_counter = gm20b_clk_get_gpcclk_clock_counter,
384 .pll_reg_write = gm20b_clk_pll_reg_write,
385 .get_pll_debug_data = gm20b_clk_get_pll_debug_data,
386 },
387 .regops = {
388 .get_global_whitelist_ranges =
389 gm20b_get_global_whitelist_ranges,
390 .get_global_whitelist_ranges_count =
391 gm20b_get_global_whitelist_ranges_count,
392 .get_context_whitelist_ranges =
393 gm20b_get_context_whitelist_ranges,
394 .get_context_whitelist_ranges_count =
395 gm20b_get_context_whitelist_ranges_count,
396 .get_runcontrol_whitelist = gm20b_get_runcontrol_whitelist,
397 .get_runcontrol_whitelist_count =
398 gm20b_get_runcontrol_whitelist_count,
399 .get_runcontrol_whitelist_ranges =
400 gm20b_get_runcontrol_whitelist_ranges,
401 .get_runcontrol_whitelist_ranges_count =
402 gm20b_get_runcontrol_whitelist_ranges_count,
403 .get_qctl_whitelist = gm20b_get_qctl_whitelist,
404 .get_qctl_whitelist_count = gm20b_get_qctl_whitelist_count,
405 .get_qctl_whitelist_ranges = gm20b_get_qctl_whitelist_ranges,
406 .get_qctl_whitelist_ranges_count =
407 gm20b_get_qctl_whitelist_ranges_count,
408 .apply_smpc_war = gm20b_apply_smpc_war,
409 },
410 .mc = {
411 .intr_enable = mc_gk20a_intr_enable,
412 .intr_unit_config = mc_gk20a_intr_unit_config,
413 .isr_stall = mc_gk20a_isr_stall,
414 .intr_stall = mc_gk20a_intr_stall,
415 .intr_stall_pause = mc_gk20a_intr_stall_pause,
416 .intr_stall_resume = mc_gk20a_intr_stall_resume,
417 .intr_nonstall = mc_gk20a_intr_nonstall,
418 .intr_nonstall_pause = mc_gk20a_intr_nonstall_pause,
419 .intr_nonstall_resume = mc_gk20a_intr_nonstall_resume,
420 .enable = gk20a_mc_enable,
421 .disable = gk20a_mc_disable,
422 .reset = gk20a_mc_reset,
423 .boot_0 = gk20a_mc_boot_0,
424 .is_intr1_pending = mc_gk20a_is_intr1_pending,
425 },
426 .debug = {
427 .show_dump = NULL,
428 },
429 .dbg_session_ops = {
430 .exec_reg_ops = vgpu_exec_regops,
431 .dbg_set_powergate = vgpu_dbg_set_powergate,
432 .check_and_set_global_reservation =
433 vgpu_check_and_set_global_reservation,
434 .check_and_set_context_reservation =
435 vgpu_check_and_set_context_reservation,
436 .release_profiler_reservation =
437 vgpu_release_profiler_reservation,
438 .perfbuffer_enable = vgpu_perfbuffer_enable,
439 .perfbuffer_disable = vgpu_perfbuffer_disable,
440 },
441 .bus = {
442 .init_hw = gk20a_bus_init_hw,
443 .isr = gk20a_bus_isr,
444 .read_ptimer = vgpu_read_ptimer,
445 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
446 .bar1_bind = gm20b_bus_bar1_bind,
447 },
448#if defined(CONFIG_GK20A_CYCLE_STATS)
449 .css = {
450 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
451 .disable_snapshot = vgpu_css_release_snapshot_buffer,
452 .check_data_available = vgpu_css_flush_snapshots,
453 .detach_snapshot = vgpu_css_detach,
454 .set_handled_snapshots = NULL,
455 .allocate_perfmon_ids = NULL,
456 .release_perfmon_ids = NULL,
457 },
458#endif
459 .falcon = {
460 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
461 },
462 .priv_ring = {
463 .isr = gk20a_priv_ring_isr,
464 },
465 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
466 .get_litter_value = gm20b_get_litter_value,
467};
468
469int vgpu_gm20b_init_hal(struct gk20a *g)
470{
471 struct gpu_ops *gops = &g->ops;
472 u32 val;
473
474 gops->ltc = vgpu_gm20b_ops.ltc;
475 gops->ce2 = vgpu_gm20b_ops.ce2;
476 gops->gr = vgpu_gm20b_ops.gr;
477 gops->fb = vgpu_gm20b_ops.fb;
478 gops->clock_gating = vgpu_gm20b_ops.clock_gating;
479 gops->fifo = vgpu_gm20b_ops.fifo;
480 gops->gr_ctx = vgpu_gm20b_ops.gr_ctx;
481 gops->mm = vgpu_gm20b_ops.mm;
482 gops->therm = vgpu_gm20b_ops.therm;
483 gops->pmu = vgpu_gm20b_ops.pmu;
484 /*
485 * clk must be assigned member by member
486 * since some clk ops are assigned during probe prior to HAL init
487 */
488 gops->clk.init_clk_support = vgpu_gm20b_ops.clk.init_clk_support;
489 gops->clk.suspend_clk_support = vgpu_gm20b_ops.clk.suspend_clk_support;
490 gops->clk.get_voltage = vgpu_gm20b_ops.clk.get_voltage;
491 gops->clk.get_gpcclk_clock_counter =
492 vgpu_gm20b_ops.clk.get_gpcclk_clock_counter;
493 gops->clk.pll_reg_write = vgpu_gm20b_ops.clk.pll_reg_write;
494 gops->clk.get_pll_debug_data = vgpu_gm20b_ops.clk.get_pll_debug_data;
495
496 gops->regops = vgpu_gm20b_ops.regops;
497 gops->mc = vgpu_gm20b_ops.mc;
498 gops->dbg_session_ops = vgpu_gm20b_ops.dbg_session_ops;
499 gops->debug = vgpu_gm20b_ops.debug;
500 gops->bus = vgpu_gm20b_ops.bus;
501#if defined(CONFIG_GK20A_CYCLE_STATS)
502 gops->css = vgpu_gm20b_ops.css;
503#endif
504 gops->falcon = vgpu_gm20b_ops.falcon;
505
506 gops->priv_ring = vgpu_gm20b_ops.priv_ring;
507
508 /* Lone functions */
509 gops->chip_init_gpu_characteristics =
510 vgpu_gm20b_ops.chip_init_gpu_characteristics;
511 gops->get_litter_value = vgpu_gm20b_ops.get_litter_value;
512
513 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
514 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
515 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
516
517#ifdef CONFIG_TEGRA_ACR
518 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
519 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
520 } else {
521 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
522 if (!val) {
523 gk20a_dbg_info("priv security is disabled in HW");
524 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
525 } else {
526 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
527 }
528 }
529#else
530 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
531 gk20a_dbg_info("running ASIM with PRIV security disabled");
532 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
533 } else {
534 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
535 if (!val) {
536 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
537 } else {
538 gk20a_dbg_info("priv security is not supported but enabled");
539 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
540 return -EPERM;
541 }
542 }
543#endif
544
545 /* priv security dependent ops */
546 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
547 /* Add in ops from gm20b acr */
548 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
549 gops->pmu.prepare_ucode = prepare_ucode_blob;
550 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
551 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
552 gops->pmu.is_priv_load = gm20b_is_priv_load;
553 gops->pmu.get_wpr = gm20b_wpr_info;
554 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
555 gops->pmu.pmu_populate_loader_cfg =
556 gm20b_pmu_populate_loader_cfg;
557 gops->pmu.flcn_populate_bl_dmem_desc =
558 gm20b_flcn_populate_bl_dmem_desc;
559 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
560 gops->pmu.falcon_clear_halt_interrupt_status =
561 clear_halt_interrupt_status;
562 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
563
564 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
565 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
566
567 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
568 } else {
569 /* Inherit from gk20a */
570 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
571 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
572 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
573 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
574
575 gops->pmu.load_lsfalcon_ucode = NULL;
576 gops->pmu.init_wpr_region = NULL;
577
578 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
579 }
580
581 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
582 g->pmu_lsf_pmu_wpr_init_done = 0;
583 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
584
585 g->name = "gm20b";
586
587 return 0;
588}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c
deleted file mode 100644
index 4348db8e..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_fifo_gp10b.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vgpu_fifo_gp10b.h"
24
25void vgpu_gp10b_init_fifo_ops(struct gpu_ops *gops)
26{
27 /* syncpoint protection not supported yet */
28 gops->fifo.resetup_ramfc = NULL;
29 gops->fifo.reschedule_runlist = NULL;
30}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
deleted file mode 100644
index 8a5130f6..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/bug.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu/gm20b/vgpu_gr_gm20b.h"
29
30#include "vgpu_gr_gp10b.h"
31
32#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
33
34void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
35 struct gr_ctx_desc *gr_ctx)
36{
37 struct tegra_vgpu_cmd_msg msg = {0};
38 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
39 int err;
40
41 gk20a_dbg_fn("");
42
43 if (!gr_ctx || !gr_ctx->mem.gpu_va)
44 return;
45
46 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
47 msg.handle = vgpu_get_handle(g);
48 p->gr_ctx_handle = gr_ctx->virt_ctx;
49 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
50 WARN_ON(err || msg.ret);
51
52 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
53
54 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
55 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
56 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
57 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
58
59 nvgpu_kfree(g, gr_ctx);
60}
61
62int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
63 struct gr_ctx_desc **__gr_ctx,
64 struct vm_gk20a *vm,
65 u32 class,
66 u32 flags)
67{
68 struct gr_ctx_desc *gr_ctx;
69 u32 graphics_preempt_mode = 0;
70 u32 compute_preempt_mode = 0;
71 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
72 int err;
73
74 gk20a_dbg_fn("");
75
76 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags);
77 if (err)
78 return err;
79
80 gr_ctx = *__gr_ctx;
81
82 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
83 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
84 if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
85 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
86
87 if (priv->constants.force_preempt_mode && !graphics_preempt_mode &&
88 !compute_preempt_mode) {
89 graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ?
90 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0;
91 compute_preempt_mode =
92 g->ops.gr.is_valid_compute_class(g, class) ?
93 NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0;
94 }
95
96 if (graphics_preempt_mode || compute_preempt_mode) {
97 if (g->ops.gr.set_ctxsw_preemption_mode) {
98 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
99 class, graphics_preempt_mode, compute_preempt_mode);
100 if (err) {
101 nvgpu_err(g,
102 "set_ctxsw_preemption_mode failed");
103 goto fail;
104 }
105 } else {
106 err = -ENOSYS;
107 goto fail;
108 }
109 }
110
111 gk20a_dbg_fn("done");
112 return err;
113
114fail:
115 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
116 return err;
117}
118
119int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
120 struct gr_ctx_desc *gr_ctx,
121 struct vm_gk20a *vm, u32 class,
122 u32 graphics_preempt_mode,
123 u32 compute_preempt_mode)
124{
125 struct tegra_vgpu_cmd_msg msg = {};
126 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
127 &msg.params.gr_bind_ctxsw_buffers;
128 int err = 0;
129
130 if (g->ops.gr.is_valid_gfx_class(g, class) &&
131 g->gr.t18x.ctx_vars.force_preemption_gfxp)
132 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
133
134 if (g->ops.gr.is_valid_compute_class(g, class) &&
135 g->gr.t18x.ctx_vars.force_preemption_cilp)
136 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
137
138 /* check for invalid combinations */
139 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
140 return -EINVAL;
141
142 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
143 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
144 return -EINVAL;
145
146 /* set preemption modes */
147 switch (graphics_preempt_mode) {
148 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
149 {
150 u32 spill_size =
151 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
152 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
153 u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
154 gr_scc_pagepool_total_pages_byte_granularity_v();
155 u32 betacb_size = g->gr.attrib_cb_default_size +
156 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
157 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
158 u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) *
159 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
160 g->gr.max_tpc_count;
161 struct nvgpu_mem *desc;
162
163 attrib_cb_size = ALIGN(attrib_cb_size, 128);
164
165 gk20a_dbg_info("gfxp context preempt size=%d",
166 g->gr.t18x.ctx_vars.preempt_image_size);
167 gk20a_dbg_info("gfxp context spill size=%d", spill_size);
168 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size);
169 gk20a_dbg_info("gfxp context attrib cb size=%d",
170 attrib_cb_size);
171
172 err = gr_gp10b_alloc_buffer(vm,
173 g->gr.t18x.ctx_vars.preempt_image_size,
174 &gr_ctx->t18x.preempt_ctxsw_buffer);
175 if (err) {
176 err = -ENOMEM;
177 goto fail;
178 }
179 desc = &gr_ctx->t18x.preempt_ctxsw_buffer;
180 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va;
181 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size;
182
183 err = gr_gp10b_alloc_buffer(vm,
184 spill_size,
185 &gr_ctx->t18x.spill_ctxsw_buffer);
186 if (err) {
187 err = -ENOMEM;
188 goto fail;
189 }
190 desc = &gr_ctx->t18x.spill_ctxsw_buffer;
191 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va;
192 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size;
193
194 err = gr_gp10b_alloc_buffer(vm,
195 pagepool_size,
196 &gr_ctx->t18x.pagepool_ctxsw_buffer);
197 if (err) {
198 err = -ENOMEM;
199 goto fail;
200 }
201 desc = &gr_ctx->t18x.pagepool_ctxsw_buffer;
202 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] =
203 desc->gpu_va;
204 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size;
205
206 err = gr_gp10b_alloc_buffer(vm,
207 attrib_cb_size,
208 &gr_ctx->t18x.betacb_ctxsw_buffer);
209 if (err) {
210 err = -ENOMEM;
211 goto fail;
212 }
213 desc = &gr_ctx->t18x.betacb_ctxsw_buffer;
214 p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] =
215 desc->gpu_va;
216 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
217
218 gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
219 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
220 break;
221 }
222 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
223 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
224 break;
225
226 default:
227 break;
228 }
229
230 if (g->ops.gr.is_valid_compute_class(g, class)) {
231 switch (compute_preempt_mode) {
232 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
233 gr_ctx->compute_preempt_mode =
234 NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
235 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
236 break;
237 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
238 gr_ctx->compute_preempt_mode =
239 NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
240 p->mode =
241 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
242 break;
243 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
244 gr_ctx->compute_preempt_mode =
245 NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
246 p->mode =
247 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
248 break;
249 default:
250 break;
251 }
252 }
253
254 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
255 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
256 msg.handle = vgpu_get_handle(g);
257 p->gr_ctx_handle = gr_ctx->virt_ctx;
258 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
259 if (err || msg.ret) {
260 err = -ENOMEM;
261 goto fail;
262 }
263 }
264
265 return err;
266
267fail:
268 nvgpu_err(g, "%s failed %d", __func__, err);
269 return err;
270}
271
272int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
273 u32 graphics_preempt_mode,
274 u32 compute_preempt_mode)
275{
276 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
277 struct gk20a *g = ch->g;
278 struct tsg_gk20a *tsg;
279 struct vm_gk20a *vm;
280 u32 class;
281 int err;
282
283 class = ch->obj_class;
284 if (!class)
285 return -EINVAL;
286
287 /* skip setting anything if both modes are already set */
288 if (graphics_preempt_mode &&
289 (graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
290 graphics_preempt_mode = 0;
291
292 if (compute_preempt_mode &&
293 (compute_preempt_mode == gr_ctx->compute_preempt_mode))
294 compute_preempt_mode = 0;
295
296 if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
297 return 0;
298
299 if (gk20a_is_channel_marked_as_tsg(ch)) {
300 tsg = &g->fifo.tsg[ch->tsgid];
301 vm = tsg->vm;
302 } else {
303 vm = ch->vm;
304 }
305
306 if (g->ops.gr.set_ctxsw_preemption_mode) {
307 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
308 graphics_preempt_mode,
309 compute_preempt_mode);
310 if (err) {
311 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
312 return err;
313 }
314 } else {
315 err = -ENOSYS;
316 }
317
318 return err;
319}
320
321int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
322{
323 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
324 int err;
325
326 gk20a_dbg_fn("");
327
328 err = vgpu_gr_init_ctx_state(g);
329 if (err)
330 return err;
331
332 g->gr.t18x.ctx_vars.preempt_image_size =
333 priv->constants.preempt_ctx_size;
334 if (!g->gr.t18x.ctx_vars.preempt_image_size)
335 return -EINVAL;
336
337 return 0;
338}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h
deleted file mode 100644
index baf5a8e9..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_GR_GP10B_H__
24#define __VGPU_GR_GP10B_H__
25
26#include "gk20a/gk20a.h"
27
28void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
29 struct gr_ctx_desc *gr_ctx);
30int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
31 struct gr_ctx_desc **__gr_ctx,
32 struct vm_gk20a *vm,
33 u32 class,
34 u32 flags);
35int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
36 struct gr_ctx_desc *gr_ctx,
37 struct vm_gk20a *vm, u32 class,
38 u32 graphics_preempt_mode,
39 u32 compute_preempt_mode);
40int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
41 u32 graphics_preempt_mode,
42 u32 compute_preempt_mode);
43int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g);
44
45#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
deleted file mode 100644
index 55448f3b..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
+++ /dev/null
@@ -1,630 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vgpu/vgpu.h"
24#include "vgpu/fifo_vgpu.h"
25#include "vgpu/gr_vgpu.h"
26#include "vgpu/ltc_vgpu.h"
27#include "vgpu/mm_vgpu.h"
28#include "vgpu/dbg_vgpu.h"
29#include "vgpu/fecs_trace_vgpu.h"
30#include "vgpu/css_vgpu.h"
31#include "gp10b/gp10b.h"
32#include "gp10b/hal_gp10b.h"
33#include "vgpu/gm20b/vgpu_gr_gm20b.h"
34#include "vgpu_gr_gp10b.h"
35#include "vgpu_mm_gp10b.h"
36
37#include "gk20a/bus_gk20a.h"
38#include "gk20a/pramin_gk20a.h"
39#include "gk20a/flcn_gk20a.h"
40#include "gk20a/mc_gk20a.h"
41#include "gk20a/fb_gk20a.h"
42
43#include "gp10b/mc_gp10b.h"
44#include "gp10b/ltc_gp10b.h"
45#include "gp10b/mm_gp10b.h"
46#include "gp10b/ce_gp10b.h"
47#include "gp10b/fb_gp10b.h"
48#include "gp10b/pmu_gp10b.h"
49#include "gp10b/gr_ctx_gp10b.h"
50#include "gp10b/fifo_gp10b.h"
51#include "gp10b/gp10b_gating_reglist.h"
52#include "gp10b/regops_gp10b.h"
53#include "gp10b/therm_gp10b.h"
54#include "gp10b/priv_ring_gp10b.h"
55
56#include "gm20b/ltc_gm20b.h"
57#include "gm20b/gr_gm20b.h"
58#include "gm20b/fifo_gm20b.h"
59#include "gm20b/acr_gm20b.h"
60#include "gm20b/pmu_gm20b.h"
61#include "gm20b/fb_gm20b.h"
62#include "gm20b/mm_gm20b.h"
63
64#include <nvgpu/enabled.h>
65
66#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
67#include <nvgpu/hw/gp10b/hw_fifo_gp10b.h>
68#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
69#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
70#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
71#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
72
73static const struct gpu_ops vgpu_gp10b_ops = {
74 .ltc = {
75 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
76 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
77 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
78 .init_cbc = gm20b_ltc_init_cbc,
79 .init_fs_state = vgpu_ltc_init_fs_state,
80 .init_comptags = vgpu_ltc_init_comptags,
81 .cbc_ctrl = NULL,
82 .isr = gp10b_ltc_isr,
83 .cbc_fix_config = gm20b_ltc_cbc_fix_config,
84 .flush = gm20b_flush_ltc,
85 .set_enabled = gp10b_ltc_set_enabled,
86 },
87 .ce2 = {
88 .isr_stall = gp10b_ce_isr,
89 .isr_nonstall = gp10b_ce_nonstall_isr,
90 .get_num_pce = vgpu_ce_get_num_pce,
91 },
92 .gr = {
93 .get_patch_slots = gr_gk20a_get_patch_slots,
94 .init_gpc_mmu = gr_gm20b_init_gpc_mmu,
95 .bundle_cb_defaults = gr_gm20b_bundle_cb_defaults,
96 .cb_size_default = gr_gp10b_cb_size_default,
97 .calc_global_ctx_buffer_size =
98 gr_gp10b_calc_global_ctx_buffer_size,
99 .commit_global_attrib_cb = gr_gp10b_commit_global_attrib_cb,
100 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
101 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
102 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
103 .handle_sw_method = gr_gp10b_handle_sw_method,
104 .set_alpha_circular_buffer_size =
105 gr_gp10b_set_alpha_circular_buffer_size,
106 .set_circular_buffer_size = gr_gp10b_set_circular_buffer_size,
107 .enable_hww_exceptions = gr_gk20a_enable_hww_exceptions,
108 .is_valid_class = gr_gp10b_is_valid_class,
109 .is_valid_gfx_class = gr_gp10b_is_valid_gfx_class,
110 .is_valid_compute_class = gr_gp10b_is_valid_compute_class,
111 .get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
112 .get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
113 .init_fs_state = vgpu_gm20b_init_fs_state,
114 .set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
115 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
116 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
117 .set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask,
118 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
119 .free_channel_ctx = vgpu_gr_free_channel_ctx,
120 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
121 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
122 .get_zcull_info = vgpu_gr_get_zcull_info,
123 .is_tpc_addr = gr_gm20b_is_tpc_addr,
124 .get_tpc_num = gr_gm20b_get_tpc_num,
125 .detect_sm_arch = vgpu_gr_detect_sm_arch,
126 .add_zbc_color = gr_gp10b_add_zbc_color,
127 .add_zbc_depth = gr_gp10b_add_zbc_depth,
128 .zbc_set_table = vgpu_gr_add_zbc,
129 .zbc_query_table = vgpu_gr_query_zbc,
130 .pmu_save_zbc = gk20a_pmu_save_zbc,
131 .add_zbc = gr_gk20a_add_zbc,
132 .pagepool_default_size = gr_gp10b_pagepool_default_size,
133 .init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
134 .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
135 .free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
136 .update_ctxsw_preemption_mode =
137 gr_gp10b_update_ctxsw_preemption_mode,
138 .dump_gr_regs = NULL,
139 .update_pc_sampling = gr_gm20b_update_pc_sampling,
140 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
141 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
142 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
143 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
144 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
145 .init_sm_dsm_reg_info = gr_gm20b_init_sm_dsm_reg_info,
146 .wait_empty = gr_gp10b_wait_empty,
147 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
148 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
149 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
150 .bpt_reg_info = gr_gm20b_bpt_reg_info,
151 .get_access_map = gr_gp10b_get_access_map,
152 .handle_fecs_error = gr_gp10b_handle_fecs_error,
153 .handle_sm_exception = gr_gp10b_handle_sm_exception,
154 .handle_tex_exception = gr_gp10b_handle_tex_exception,
155 .enable_gpc_exceptions = gk20a_gr_enable_gpc_exceptions,
156 .enable_exceptions = gk20a_gr_enable_exceptions,
157 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
158 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
159 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
160 .record_sm_error_state = gm20b_gr_record_sm_error_state,
161 .update_sm_error_state = gm20b_gr_update_sm_error_state,
162 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
163 .suspend_contexts = vgpu_gr_suspend_contexts,
164 .resume_contexts = vgpu_gr_resume_contexts,
165 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
166 .init_sm_id_table = gr_gk20a_init_sm_id_table,
167 .load_smid_config = gr_gp10b_load_smid_config,
168 .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering,
169 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
170 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
171 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
172 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
173 .setup_rop_mapping = gr_gk20a_setup_rop_mapping,
174 .program_zcull_mapping = gr_gk20a_program_zcull_mapping,
175 .commit_global_timeslice = gr_gk20a_commit_global_timeslice,
176 .commit_inst = vgpu_gr_commit_inst,
177 .write_zcull_ptr = gr_gk20a_write_zcull_ptr,
178 .write_pm_ptr = gr_gk20a_write_pm_ptr,
179 .init_elcg_mode = gr_gk20a_init_elcg_mode,
180 .load_tpc_mask = gr_gm20b_load_tpc_mask,
181 .inval_icache = gr_gk20a_inval_icache,
182 .trigger_suspend = gr_gk20a_trigger_suspend,
183 .wait_for_pause = gr_gk20a_wait_for_pause,
184 .resume_from_pause = gr_gk20a_resume_from_pause,
185 .clear_sm_errors = gr_gk20a_clear_sm_errors,
186 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
187 .get_esr_sm_sel = gk20a_gr_get_esr_sm_sel,
188 .sm_debugger_attached = gk20a_gr_sm_debugger_attached,
189 .suspend_single_sm = gk20a_gr_suspend_single_sm,
190 .suspend_all_sms = gk20a_gr_suspend_all_sms,
191 .resume_single_sm = gk20a_gr_resume_single_sm,
192 .resume_all_sms = gk20a_gr_resume_all_sms,
193 .get_sm_hww_warp_esr = gp10b_gr_get_sm_hww_warp_esr,
194 .get_sm_hww_global_esr = gk20a_gr_get_sm_hww_global_esr,
195 .get_sm_no_lock_down_hww_global_esr_mask =
196 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask,
197 .lock_down_sm = gk20a_gr_lock_down_sm,
198 .wait_for_sm_lock_down = gk20a_gr_wait_for_sm_lock_down,
199 .clear_sm_hww = gm20b_gr_clear_sm_hww,
200 .init_ovr_sm_dsm_perf = gk20a_gr_init_ovr_sm_dsm_perf,
201 .get_ovr_perf_regs = gk20a_gr_get_ovr_perf_regs,
202 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
203 .set_boosted_ctx = NULL,
204 .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode,
205 .set_czf_bypass = gr_gp10b_set_czf_bypass,
206 .init_czf_bypass = gr_gp10b_init_czf_bypass,
207 .pre_process_sm_exception = gr_gp10b_pre_process_sm_exception,
208 .set_preemption_buffer_va = gr_gp10b_set_preemption_buffer_va,
209 .init_preemption_state = gr_gp10b_init_preemption_state,
210 .update_boosted_ctx = NULL,
211 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
212 .create_gr_sysfs = gr_gp10b_create_sysfs,
213 .set_ctxsw_preemption_mode =
214 vgpu_gr_gp10b_set_ctxsw_preemption_mode,
215 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
216 },
217 .fb = {
218 .reset = fb_gk20a_reset,
219 .init_hw = gk20a_fb_init_hw,
220 .init_fs_state = fb_gm20b_init_fs_state,
221 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
222 .set_use_full_comp_tag_line =
223 gm20b_fb_set_use_full_comp_tag_line,
224 .compression_page_size = gp10b_fb_compression_page_size,
225 .compressible_page_size = gp10b_fb_compressible_page_size,
226 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
227 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
228 .read_wpr_info = gm20b_fb_read_wpr_info,
229 .is_debug_mode_enabled = NULL,
230 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
231 .tlb_invalidate = vgpu_mm_tlb_invalidate,
232 },
233 .clock_gating = {
234 .slcg_bus_load_gating_prod =
235 gp10b_slcg_bus_load_gating_prod,
236 .slcg_ce2_load_gating_prod =
237 gp10b_slcg_ce2_load_gating_prod,
238 .slcg_chiplet_load_gating_prod =
239 gp10b_slcg_chiplet_load_gating_prod,
240 .slcg_ctxsw_firmware_load_gating_prod =
241 gp10b_slcg_ctxsw_firmware_load_gating_prod,
242 .slcg_fb_load_gating_prod =
243 gp10b_slcg_fb_load_gating_prod,
244 .slcg_fifo_load_gating_prod =
245 gp10b_slcg_fifo_load_gating_prod,
246 .slcg_gr_load_gating_prod =
247 gr_gp10b_slcg_gr_load_gating_prod,
248 .slcg_ltc_load_gating_prod =
249 ltc_gp10b_slcg_ltc_load_gating_prod,
250 .slcg_perf_load_gating_prod =
251 gp10b_slcg_perf_load_gating_prod,
252 .slcg_priring_load_gating_prod =
253 gp10b_slcg_priring_load_gating_prod,
254 .slcg_pmu_load_gating_prod =
255 gp10b_slcg_pmu_load_gating_prod,
256 .slcg_therm_load_gating_prod =
257 gp10b_slcg_therm_load_gating_prod,
258 .slcg_xbar_load_gating_prod =
259 gp10b_slcg_xbar_load_gating_prod,
260 .blcg_bus_load_gating_prod =
261 gp10b_blcg_bus_load_gating_prod,
262 .blcg_ce_load_gating_prod =
263 gp10b_blcg_ce_load_gating_prod,
264 .blcg_ctxsw_firmware_load_gating_prod =
265 gp10b_blcg_ctxsw_firmware_load_gating_prod,
266 .blcg_fb_load_gating_prod =
267 gp10b_blcg_fb_load_gating_prod,
268 .blcg_fifo_load_gating_prod =
269 gp10b_blcg_fifo_load_gating_prod,
270 .blcg_gr_load_gating_prod =
271 gp10b_blcg_gr_load_gating_prod,
272 .blcg_ltc_load_gating_prod =
273 gp10b_blcg_ltc_load_gating_prod,
274 .blcg_pwr_csb_load_gating_prod =
275 gp10b_blcg_pwr_csb_load_gating_prod,
276 .blcg_pmu_load_gating_prod =
277 gp10b_blcg_pmu_load_gating_prod,
278 .blcg_xbar_load_gating_prod =
279 gp10b_blcg_xbar_load_gating_prod,
280 .pg_gr_load_gating_prod =
281 gr_gp10b_pg_gr_load_gating_prod,
282 },
283 .fifo = {
284 .init_fifo_setup_hw = vgpu_init_fifo_setup_hw,
285 .bind_channel = vgpu_channel_bind,
286 .unbind_channel = vgpu_channel_unbind,
287 .disable_channel = vgpu_channel_disable,
288 .enable_channel = vgpu_channel_enable,
289 .alloc_inst = vgpu_channel_alloc_inst,
290 .free_inst = vgpu_channel_free_inst,
291 .setup_ramfc = vgpu_channel_setup_ramfc,
292 .channel_set_timeslice = vgpu_channel_set_timeslice,
293 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
294 .setup_userd = gk20a_fifo_setup_userd,
295 .userd_gp_get = gk20a_fifo_userd_gp_get,
296 .userd_gp_put = gk20a_fifo_userd_gp_put,
297 .userd_pb_get = gk20a_fifo_userd_pb_get,
298 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
299 .preempt_channel = vgpu_fifo_preempt_channel,
300 .preempt_tsg = vgpu_fifo_preempt_tsg,
301 .enable_tsg = vgpu_enable_tsg,
302 .disable_tsg = gk20a_disable_tsg,
303 .tsg_verify_channel_status = NULL,
304 .tsg_verify_status_ctx_reload = NULL,
305 .reschedule_runlist = NULL,
306 .update_runlist = vgpu_fifo_update_runlist,
307 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
308 .get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
309 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
310 .get_num_fifos = gm20b_fifo_get_num_fifos,
311 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
312 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
313 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
314 .tsg_open = vgpu_tsg_open,
315 .force_reset_ch = vgpu_fifo_force_reset_ch,
316 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
317 .device_info_data_parse = gp10b_device_info_data_parse,
318 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
319 .init_engine_info = vgpu_fifo_init_engine_info,
320 .runlist_entry_size = ram_rl_entry_size_v,
321 .get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
322 .get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
323 .is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
324 .dump_pbdma_status = gk20a_dump_pbdma_status,
325 .dump_eng_status = gk20a_dump_eng_status,
326 .dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
327 .intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
328 .is_preempt_pending = gk20a_fifo_is_preempt_pending,
329 .init_pbdma_intr_descs = gp10b_fifo_init_pbdma_intr_descs,
330 .reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
331 .teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
332 .handle_sched_error = gk20a_fifo_handle_sched_error,
333 .handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0,
334 .handle_pbdma_intr_1 = gk20a_fifo_handle_pbdma_intr_1,
335 .tsg_bind_channel = vgpu_tsg_bind_channel,
336 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
337#ifdef CONFIG_TEGRA_GK20A_NVHOST
338 .alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf,
339 .free_syncpt_buf = gk20a_fifo_free_syncpt_buf,
340 .add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd,
341 .get_syncpt_wait_cmd_size = gk20a_fifo_get_syncpt_wait_cmd_size,
342 .add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd,
343 .get_syncpt_incr_cmd_size = gk20a_fifo_get_syncpt_incr_cmd_size,
344#endif
345 .resetup_ramfc = NULL,
346 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
347 },
348 .gr_ctx = {
349 .get_netlist_name = gr_gp10b_get_netlist_name,
350 .is_fw_defined = gr_gp10b_is_firmware_defined,
351 },
352#ifdef CONFIG_GK20A_CTXSW_TRACE
353 .fecs_trace = {
354 .alloc_user_buffer = vgpu_alloc_user_buffer,
355 .free_user_buffer = vgpu_free_user_buffer,
356 .mmap_user_buffer = vgpu_mmap_user_buffer,
357 .init = vgpu_fecs_trace_init,
358 .deinit = vgpu_fecs_trace_deinit,
359 .enable = vgpu_fecs_trace_enable,
360 .disable = vgpu_fecs_trace_disable,
361 .is_enabled = vgpu_fecs_trace_is_enabled,
362 .reset = NULL,
363 .flush = NULL,
364 .poll = vgpu_fecs_trace_poll,
365 .bind_channel = NULL,
366 .unbind_channel = NULL,
367 .max_entries = vgpu_fecs_trace_max_entries,
368 .set_filter = vgpu_fecs_trace_set_filter,
369 },
370#endif /* CONFIG_GK20A_CTXSW_TRACE */
371 .mm = {
372 /* FIXME: add support for sparse mappings */
373 .support_sparse = NULL,
374 .gmmu_map = vgpu_gp10b_locked_gmmu_map,
375 .gmmu_unmap = vgpu_locked_gmmu_unmap,
376 .vm_bind_channel = vgpu_vm_bind_channel,
377 .fb_flush = vgpu_mm_fb_flush,
378 .l2_invalidate = vgpu_mm_l2_invalidate,
379 .l2_flush = vgpu_mm_l2_flush,
380 .cbc_clean = gk20a_mm_cbc_clean,
381 .set_big_page_size = gm20b_mm_set_big_page_size,
382 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
383 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
384 .gpu_phys_addr = gm20b_gpu_phys_addr,
385 .get_iommu_bit = gk20a_mm_get_iommu_bit,
386 .get_mmu_levels = gp10b_mm_get_mmu_levels,
387 .init_pdb = gp10b_mm_init_pdb,
388 .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw,
389 .is_bar1_supported = gm20b_mm_is_bar1_supported,
390 .init_inst_block = gk20a_init_inst_block,
391 .mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
392 .init_bar2_vm = gb10b_init_bar2_vm,
393 .init_bar2_mm_hw_setup = gb10b_init_bar2_mm_hw_setup,
394 .remove_bar2_vm = gp10b_remove_bar2_vm,
395 .get_kind_invalid = gm20b_get_kind_invalid,
396 .get_kind_pitch = gm20b_get_kind_pitch,
397 },
398 .pramin = {
399 .enter = gk20a_pramin_enter,
400 .exit = gk20a_pramin_exit,
401 .data032_r = pram_data032_r,
402 },
403 .therm = {
404 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
405 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
406 },
407 .pmu = {
408 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
409 .pmu_get_queue_head = pwr_pmu_queue_head_r,
410 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
411 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
412 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
413 .pmu_queue_head = gk20a_pmu_queue_head,
414 .pmu_queue_tail = gk20a_pmu_queue_tail,
415 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
416 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
417 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
418 .pmu_mutex_release = gk20a_pmu_mutex_release,
419 .write_dmatrfbase = gp10b_write_dmatrfbase,
420 .pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
421 .pmu_pg_init_param = gp10b_pg_gr_init,
422 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
423 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
424 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
425 .reset_engine = gk20a_pmu_engine_reset,
426 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
427 },
428 .regops = {
429 .get_global_whitelist_ranges =
430 gp10b_get_global_whitelist_ranges,
431 .get_global_whitelist_ranges_count =
432 gp10b_get_global_whitelist_ranges_count,
433 .get_context_whitelist_ranges =
434 gp10b_get_context_whitelist_ranges,
435 .get_context_whitelist_ranges_count =
436 gp10b_get_context_whitelist_ranges_count,
437 .get_runcontrol_whitelist = gp10b_get_runcontrol_whitelist,
438 .get_runcontrol_whitelist_count =
439 gp10b_get_runcontrol_whitelist_count,
440 .get_runcontrol_whitelist_ranges =
441 gp10b_get_runcontrol_whitelist_ranges,
442 .get_runcontrol_whitelist_ranges_count =
443 gp10b_get_runcontrol_whitelist_ranges_count,
444 .get_qctl_whitelist = gp10b_get_qctl_whitelist,
445 .get_qctl_whitelist_count = gp10b_get_qctl_whitelist_count,
446 .get_qctl_whitelist_ranges = gp10b_get_qctl_whitelist_ranges,
447 .get_qctl_whitelist_ranges_count =
448 gp10b_get_qctl_whitelist_ranges_count,
449 .apply_smpc_war = gp10b_apply_smpc_war,
450 },
451 .mc = {
452 .intr_enable = mc_gp10b_intr_enable,
453 .intr_unit_config = mc_gp10b_intr_unit_config,
454 .isr_stall = mc_gp10b_isr_stall,
455 .intr_stall = mc_gp10b_intr_stall,
456 .intr_stall_pause = mc_gp10b_intr_stall_pause,
457 .intr_stall_resume = mc_gp10b_intr_stall_resume,
458 .intr_nonstall = mc_gp10b_intr_nonstall,
459 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
460 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
461 .enable = gk20a_mc_enable,
462 .disable = gk20a_mc_disable,
463 .reset = gk20a_mc_reset,
464 .boot_0 = gk20a_mc_boot_0,
465 .is_intr1_pending = mc_gp10b_is_intr1_pending,
466 },
467 .debug = {
468 .show_dump = NULL,
469 },
470 .dbg_session_ops = {
471 .exec_reg_ops = vgpu_exec_regops,
472 .dbg_set_powergate = vgpu_dbg_set_powergate,
473 .check_and_set_global_reservation =
474 vgpu_check_and_set_global_reservation,
475 .check_and_set_context_reservation =
476 vgpu_check_and_set_context_reservation,
477 .release_profiler_reservation =
478 vgpu_release_profiler_reservation,
479 .perfbuffer_enable = vgpu_perfbuffer_enable,
480 .perfbuffer_disable = vgpu_perfbuffer_disable,
481 },
482 .bus = {
483 .init_hw = gk20a_bus_init_hw,
484 .isr = gk20a_bus_isr,
485 .read_ptimer = vgpu_read_ptimer,
486 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
487 .bar1_bind = gk20a_bus_bar1_bind,
488 },
489#if defined(CONFIG_GK20A_CYCLE_STATS)
490 .css = {
491 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
492 .disable_snapshot = vgpu_css_release_snapshot_buffer,
493 .check_data_available = vgpu_css_flush_snapshots,
494 .detach_snapshot = vgpu_css_detach,
495 .set_handled_snapshots = NULL,
496 .allocate_perfmon_ids = NULL,
497 .release_perfmon_ids = NULL,
498 },
499#endif
500 .falcon = {
501 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
502 },
503 .priv_ring = {
504 .isr = gp10b_priv_ring_isr,
505 },
506 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
507 .get_litter_value = gp10b_get_litter_value,
508};
509
510int vgpu_gp10b_init_hal(struct gk20a *g)
511{
512 struct gpu_ops *gops = &g->ops;
513 u32 val;
514
515 gops->ltc = vgpu_gp10b_ops.ltc;
516 gops->ce2 = vgpu_gp10b_ops.ce2;
517 gops->gr = vgpu_gp10b_ops.gr;
518 gops->fb = vgpu_gp10b_ops.fb;
519 gops->clock_gating = vgpu_gp10b_ops.clock_gating;
520 gops->fifo = vgpu_gp10b_ops.fifo;
521 gops->gr_ctx = vgpu_gp10b_ops.gr_ctx;
522 gops->fecs_trace = vgpu_gp10b_ops.fecs_trace;
523 gops->mm = vgpu_gp10b_ops.mm;
524 gops->pramin = vgpu_gp10b_ops.pramin;
525 gops->therm = vgpu_gp10b_ops.therm;
526 gops->pmu = vgpu_gp10b_ops.pmu;
527 gops->regops = vgpu_gp10b_ops.regops;
528 gops->mc = vgpu_gp10b_ops.mc;
529 gops->debug = vgpu_gp10b_ops.debug;
530 gops->dbg_session_ops = vgpu_gp10b_ops.dbg_session_ops;
531 gops->bus = vgpu_gp10b_ops.bus;
532#if defined(CONFIG_GK20A_CYCLE_STATS)
533 gops->css = vgpu_gp10b_ops.css;
534#endif
535 gops->falcon = vgpu_gp10b_ops.falcon;
536
537 gops->priv_ring = vgpu_gp10b_ops.priv_ring;
538
539 /* Lone Functions */
540 gops->chip_init_gpu_characteristics =
541 vgpu_gp10b_ops.chip_init_gpu_characteristics;
542 gops->get_litter_value = vgpu_gp10b_ops.get_litter_value;
543
544 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
545 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
546
547#ifdef CONFIG_TEGRA_ACR
548 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
549 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
550 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
551 } else if (g->is_virtual) {
552 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
553 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
554 } else {
555 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
556 if (val) {
557 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
558 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
559 } else {
560 gk20a_dbg_info("priv security is disabled in HW");
561 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
562 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
563 }
564 }
565#else
566 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
567 gk20a_dbg_info("running simulator with PRIV security disabled");
568 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
569 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
570 } else {
571 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
572 if (val) {
573 gk20a_dbg_info("priv security is not supported but enabled");
574 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
575 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
576 return -EPERM;
577 } else {
578 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
579 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
580 }
581 }
582#endif
583
584 /* priv security dependent ops */
585 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
586 /* Add in ops from gm20b acr */
587 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
588 gops->pmu.prepare_ucode = prepare_ucode_blob,
589 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
590 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
591 gops->pmu.is_priv_load = gm20b_is_priv_load,
592 gops->pmu.get_wpr = gm20b_wpr_info,
593 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
594 gops->pmu.pmu_populate_loader_cfg =
595 gm20b_pmu_populate_loader_cfg,
596 gops->pmu.flcn_populate_bl_dmem_desc =
597 gm20b_flcn_populate_bl_dmem_desc,
598 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
599 gops->pmu.falcon_clear_halt_interrupt_status =
600 clear_halt_interrupt_status,
601 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
602
603 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
604 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
605 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
606 gops->pmu.is_priv_load = gp10b_is_priv_load;
607
608 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
609 } else {
610 /* Inherit from gk20a */
611 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
612 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
613 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
614 gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
615
616 gops->pmu.load_lsfalcon_ucode = NULL;
617 gops->pmu.init_wpr_region = NULL;
618 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
619
620 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
621 }
622
623 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
624 g->pmu_lsf_pmu_wpr_init_done = 0;
625 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
626
627 g->name = "gp10b";
628
629 return 0;
630}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
deleted file mode 100644
index 5b48cca8..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <uapi/linux/nvgpu.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu_mm_gp10b.h"
29#include "gk20a/mm_gk20a.h"
30
31#include <nvgpu/bug.h>
32
33int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
34{
35 g->mm.bypass_smmu = true;
36 g->mm.disable_bigpage = true;
37 return 0;
38}
39
40static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
41 u64 addr, u64 size, size_t *oob_size)
42{
43 if (*oob_size < sizeof(*mem_desc))
44 return -ENOMEM;
45
46 mem_desc->addr = addr;
47 mem_desc->length = size;
48 *oob_size -= sizeof(*mem_desc);
49 return 0;
50}
51
52u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
53 u64 map_offset,
54 struct nvgpu_sgt *sgt,
55 u64 buffer_offset,
56 u64 size,
57 int pgsz_idx,
58 u8 kind_v,
59 u32 ctag_offset,
60 u32 flags,
61 int rw_flag,
62 bool clear_ctags,
63 bool sparse,
64 bool priv,
65 struct vm_gk20a_mapping_batch *batch,
66 enum nvgpu_aperture aperture)
67{
68 int err = 0;
69 struct gk20a *g = gk20a_from_vm(vm);
70 struct tegra_vgpu_cmd_msg msg;
71 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
72 struct tegra_vgpu_mem_desc *mem_desc;
73 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
74 u64 buffer_size = PAGE_ALIGN(size);
75 u64 space_to_skip = buffer_offset;
76 u32 mem_desc_count = 0, i;
77 void *handle = NULL;
78 size_t oob_size;
79 u8 prot;
80 void *sgl;
81
82 gk20a_dbg_fn("");
83
84 /* FIXME: add support for sparse mappings */
85
86 if (WARN_ON(!sgt) || WARN_ON(!g->mm.bypass_smmu))
87 return 0;
88
89 if (space_to_skip & (page_size - 1))
90 return 0;
91
92 memset(&msg, 0, sizeof(msg));
93
94 /* Allocate (or validate when map_offset != 0) the virtual address. */
95 if (!map_offset) {
96 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
97 if (!map_offset) {
98 nvgpu_err(g, "failed to allocate va space");
99 err = -ENOMEM;
100 goto fail;
101 }
102 }
103
104 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
105 tegra_gr_comm_get_server_vmid(),
106 TEGRA_VGPU_QUEUE_CMD,
107 (void **)&mem_desc, &oob_size);
108 if (!handle) {
109 err = -EINVAL;
110 goto fail;
111 }
112 sgl = sgt->sgl;
113 while (sgl) {
114 u64 phys_addr;
115 u64 chunk_length;
116
117 /*
118 * Cut out sgl ents for space_to_skip.
119 */
120 if (space_to_skip &&
121 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
122 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
123 sgl = nvgpu_sgt_get_next(sgt, sgl);
124 continue;
125 }
126
127 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
128 chunk_length = min(size,
129 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
130
131 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
132 chunk_length, &oob_size)) {
133 err = -ENOMEM;
134 goto fail;
135 }
136
137 space_to_skip = 0;
138 size -= chunk_length;
139 sgl = nvgpu_sgt_get_next(sgt, sgl);
140
141 if (size == 0)
142 break;
143 }
144
145 if (rw_flag == gk20a_mem_flag_read_only)
146 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
147 else if (rw_flag == gk20a_mem_flag_write_only)
148 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
149 else
150 prot = TEGRA_VGPU_MAP_PROT_NONE;
151
152 if (pgsz_idx == gmmu_page_size_kernel) {
153 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
154 pgsz_idx = gmmu_page_size_small;
155 } else if (page_size ==
156 vm->gmmu_page_sizes[gmmu_page_size_big]) {
157 pgsz_idx = gmmu_page_size_big;
158 } else {
159 nvgpu_err(g, "invalid kernel page size %d",
160 page_size);
161 goto fail;
162 }
163 }
164
165 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
166 msg.handle = vgpu_get_handle(g);
167 p->handle = vm->handle;
168 p->gpu_va = map_offset;
169 p->size = buffer_size;
170 p->mem_desc_count = mem_desc_count;
171 p->pgsz_idx = pgsz_idx;
172 p->iova = 0;
173 p->kind = kind_v;
174 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
175 p->prot = prot;
176 p->ctag_offset = ctag_offset;
177 p->clear_ctags = clear_ctags;
178 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
179 if (err || msg.ret)
180 goto fail;
181
182 /* TLB invalidate handled on server side */
183
184 tegra_gr_comm_oob_put_ptr(handle);
185 return map_offset;
186fail:
187 if (handle)
188 tegra_gr_comm_oob_put_ptr(handle);
189 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
190 nvgpu_err(g,
191 " Map: %-5s GPU virt %#-12llx +%#-9llx "
192 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
193 "kind=%#02x APT=%-6s",
194 vm->name, map_offset, buffer_size, buffer_offset,
195 vm->gmmu_page_sizes[pgsz_idx] >> 10,
196 nvgpu_gmmu_perm_str(rw_flag),
197 kind_v, "SYSMEM");
198 for (i = 0; i < mem_desc_count; i++)
199 nvgpu_err(g, " > 0x%010llx + 0x%llx",
200 mem_desc[i].addr, mem_desc[i].length);
201
202 return 0;
203}
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
deleted file mode 100644
index fd6760ff..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_MM_GP10B_H__
24#define __VGPU_MM_GP10B_H__
25
26#include "gk20a/gk20a.h"
27
28u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
29 u64 map_offset,
30 struct nvgpu_sgt *sgt,
31 u64 buffer_offset,
32 u64 size,
33 int pgsz_idx,
34 u8 kind_v,
35 u32 ctag_offset,
36 u32 flags,
37 int rw_flag,
38 bool clear_ctags,
39 bool sparse,
40 bool priv,
41 struct vm_gk20a_mapping_batch *batch,
42 enum nvgpu_aperture aperture);
43int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g);
44
45#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
deleted file mode 100644
index 5dc6f68e..00000000
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ /dev/null
@@ -1,1220 +0,0 @@
1/*
2 * Virtualized GPU Graphics
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <uapi/linux/nvgpu.h>
26
27#include <nvgpu/kmem.h>
28#include <nvgpu/bug.h>
29
30#include "vgpu/vgpu.h"
31#include "vgpu/gr_vgpu.h"
32#include "gk20a/dbg_gpu_gk20a.h"
33
34#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
35
36void vgpu_gr_detect_sm_arch(struct gk20a *g)
37{
38 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
39
40 gk20a_dbg_fn("");
41
42 g->params.sm_arch_sm_version =
43 priv->constants.sm_arch_sm_version;
44 g->params.sm_arch_spa_version =
45 priv->constants.sm_arch_spa_version;
46 g->params.sm_arch_warp_count =
47 priv->constants.sm_arch_warp_count;
48}
49
50int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
51{
52 struct tegra_vgpu_cmd_msg msg;
53 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
54 int err;
55
56 gk20a_dbg_fn("");
57
58 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
59 msg.handle = vgpu_get_handle(c->g);
60 p->handle = c->virt_ctx;
61 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
62
63 return (err || msg.ret) ? -1 : 0;
64}
65
66static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
67 struct channel_gk20a *c, bool patch)
68{
69 struct tegra_vgpu_cmd_msg msg;
70 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
71 int err;
72
73 gk20a_dbg_fn("");
74
75 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
76 msg.handle = vgpu_get_handle(g);
77 p->handle = c->virt_ctx;
78 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
79
80 return (err || msg.ret) ? -1 : 0;
81}
82
83/* load saved fresh copy of gloden image into channel gr_ctx */
84static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
85 struct channel_gk20a *c)
86{
87 struct tegra_vgpu_cmd_msg msg;
88 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
89 int err;
90
91 gk20a_dbg_fn("");
92
93 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
94 msg.handle = vgpu_get_handle(g);
95 p->handle = c->virt_ctx;
96 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
97
98 return (err || msg.ret) ? -1 : 0;
99}
100
101int vgpu_gr_init_ctx_state(struct gk20a *g)
102{
103 struct gr_gk20a *gr = &g->gr;
104 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
105
106 gk20a_dbg_fn("");
107
108 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
109 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
110 g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size;
111 if (!g->gr.ctx_vars.golden_image_size ||
112 !g->gr.ctx_vars.zcull_ctxsw_image_size ||
113 !g->gr.ctx_vars.pm_ctxsw_image_size)
114 return -ENXIO;
115
116 gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size;
117 g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
118 return 0;
119}
120
121static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
122{
123 struct gr_gk20a *gr = &g->gr;
124 int attr_buffer_size;
125
126 u32 cb_buffer_size = gr->bundle_cb_default_size *
127 gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
128
129 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
130 gr_scc_pagepool_total_pages_byte_granularity_v();
131
132 gk20a_dbg_fn("");
133
134 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
135
136 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
137 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
138
139 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
140 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
141
142 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
143 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
144
145 gk20a_dbg_info("priv access map size : %d",
146 gr->ctx_vars.priv_access_map_size);
147 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
148 gr->ctx_vars.priv_access_map_size;
149
150 return 0;
151}
152
153static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
154 struct channel_gk20a *c)
155{
156 struct tegra_vgpu_cmd_msg msg;
157 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
158 struct vm_gk20a *ch_vm = c->vm;
159 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
160 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
161 struct gr_gk20a *gr = &g->gr;
162 u64 gpu_va;
163 u32 i;
164 int err;
165
166 gk20a_dbg_fn("");
167
168 /* FIXME: add VPR support */
169
170 /* Circular Buffer */
171 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
172 gr->global_ctx_buffer[CIRCULAR].mem.size,
173 gmmu_page_size_kernel);
174
175 if (!gpu_va)
176 goto clean_up;
177 g_bfr_va[CIRCULAR_VA] = gpu_va;
178 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
179
180 /* Attribute Buffer */
181 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
182 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
183 gmmu_page_size_kernel);
184
185 if (!gpu_va)
186 goto clean_up;
187 g_bfr_va[ATTRIBUTE_VA] = gpu_va;
188 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
189
190 /* Page Pool */
191 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
192 gr->global_ctx_buffer[PAGEPOOL].mem.size,
193 gmmu_page_size_kernel);
194 if (!gpu_va)
195 goto clean_up;
196 g_bfr_va[PAGEPOOL_VA] = gpu_va;
197 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
198
199 /* Priv register Access Map */
200 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
201 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
202 gmmu_page_size_kernel);
203 if (!gpu_va)
204 goto clean_up;
205 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
206 g_bfr_size[PRIV_ACCESS_MAP_VA] =
207 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
208
209 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
210 msg.handle = vgpu_get_handle(g);
211 p->handle = c->virt_ctx;
212 p->cb_va = g_bfr_va[CIRCULAR_VA];
213 p->attr_va = g_bfr_va[ATTRIBUTE_VA];
214 p->page_pool_va = g_bfr_va[PAGEPOOL_VA];
215 p->priv_access_map_va = g_bfr_va[PRIV_ACCESS_MAP_VA];
216 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
217 if (err || msg.ret)
218 goto clean_up;
219
220 c->ch_ctx.global_ctx_buffer_mapped = true;
221 return 0;
222
223 clean_up:
224 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
225 if (g_bfr_va[i]) {
226 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
227 gmmu_page_size_kernel);
228 g_bfr_va[i] = 0;
229 }
230 }
231 return -ENOMEM;
232}
233
234static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
235{
236 struct vm_gk20a *ch_vm = c->vm;
237 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
238 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
239 u32 i;
240
241 gk20a_dbg_fn("");
242
243 if (c->ch_ctx.global_ctx_buffer_mapped) {
244 struct tegra_vgpu_cmd_msg msg;
245 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
246 int err;
247
248 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX;
249 msg.handle = vgpu_get_handle(c->g);
250 p->handle = c->virt_ctx;
251 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
252 WARN_ON(err || msg.ret);
253 }
254
255 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
256 if (g_bfr_va[i]) {
257 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
258 gmmu_page_size_kernel);
259 g_bfr_va[i] = 0;
260 g_bfr_size[i] = 0;
261 }
262 }
263 c->ch_ctx.global_ctx_buffer_mapped = false;
264}
265
266int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
267 struct gr_ctx_desc **__gr_ctx,
268 struct vm_gk20a *vm,
269 u32 class,
270 u32 flags)
271{
272 struct tegra_vgpu_cmd_msg msg = {0};
273 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
274 struct gr_gk20a *gr = &g->gr;
275 struct gr_ctx_desc *gr_ctx;
276 int err;
277
278 gk20a_dbg_fn("");
279
280 if (gr->ctx_vars.buffer_size == 0)
281 return 0;
282
283 /* alloc channel gr ctx buffer */
284 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size;
285 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
286
287 gr_ctx = nvgpu_kzalloc(g, sizeof(*gr_ctx));
288 if (!gr_ctx)
289 return -ENOMEM;
290
291 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
292 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
293 gr_ctx->mem.size,
294 gmmu_page_size_kernel);
295
296 if (!gr_ctx->mem.gpu_va) {
297 nvgpu_kfree(g, gr_ctx);
298 return -ENOMEM;
299 }
300
301 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
302 msg.handle = vgpu_get_handle(g);
303 p->as_handle = vm->handle;
304 p->gr_ctx_va = gr_ctx->mem.gpu_va;
305 p->class_num = class;
306 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
307 err = err ? err : msg.ret;
308
309 if (unlikely(err)) {
310 nvgpu_err(g, "fail to alloc gr_ctx");
311 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
312 gmmu_page_size_kernel);
313 nvgpu_kfree(g, gr_ctx);
314 } else {
315 gr_ctx->virt_ctx = p->gr_ctx_handle;
316 *__gr_ctx = gr_ctx;
317 }
318
319 return err;
320}
321
322void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
323 struct gr_ctx_desc *gr_ctx)
324{
325 gk20a_dbg_fn("");
326
327 if (gr_ctx && gr_ctx->mem.gpu_va) {
328 struct tegra_vgpu_cmd_msg msg;
329 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
330 int err;
331
332 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
333 msg.handle = vgpu_get_handle(g);
334 p->gr_ctx_handle = gr_ctx->virt_ctx;
335 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
336 WARN_ON(err || msg.ret);
337
338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
339 gmmu_page_size_kernel);
340 nvgpu_kfree(g, gr_ctx);
341 }
342}
343
344static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
345{
346 gk20a_dbg_fn("");
347
348 c->g->ops.gr.free_gr_ctx(c->g, c->vm, c->ch_ctx.gr_ctx);
349 c->ch_ctx.gr_ctx = NULL;
350}
351
352static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
353 struct channel_gk20a *c)
354{
355 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
356 struct vm_gk20a *ch_vm = c->vm;
357 struct tegra_vgpu_cmd_msg msg;
358 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
359 int err;
360
361 gk20a_dbg_fn("");
362
363 patch_ctx->mem.size = 128 * sizeof(u32);
364 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
365 patch_ctx->mem.size,
366 gmmu_page_size_kernel);
367 if (!patch_ctx->mem.gpu_va)
368 return -ENOMEM;
369
370 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
371 msg.handle = vgpu_get_handle(g);
372 p->handle = c->virt_ctx;
373 p->patch_ctx_va = patch_ctx->mem.gpu_va;
374 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
375 if (err || msg.ret) {
376 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
377 gmmu_page_size_kernel);
378 err = -ENOMEM;
379 }
380
381 return err;
382}
383
384static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
385{
386 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
387 struct vm_gk20a *ch_vm = c->vm;
388
389 gk20a_dbg_fn("");
390
391 if (patch_ctx->mem.gpu_va) {
392 struct tegra_vgpu_cmd_msg msg;
393 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
394 int err;
395
396 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX;
397 msg.handle = vgpu_get_handle(c->g);
398 p->handle = c->virt_ctx;
399 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
400 WARN_ON(err || msg.ret);
401
402 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
403 gmmu_page_size_kernel);
404 patch_ctx->mem.gpu_va = 0;
405 }
406}
407
408static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
409{
410 struct tegra_vgpu_cmd_msg msg;
411 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx;
412 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
413 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
414 int err;
415
416 gk20a_dbg_fn("");
417
418 /* check if hwpm was ever initialized. If not, nothing to do */
419 if (pm_ctx->mem.gpu_va == 0)
420 return;
421
422 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX;
423 msg.handle = vgpu_get_handle(c->g);
424 p->handle = c->virt_ctx;
425 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
426 WARN_ON(err || msg.ret);
427
428 __nvgpu_vm_free_va(c->vm, pm_ctx->mem.gpu_va,
429 gmmu_page_size_kernel);
430 pm_ctx->mem.gpu_va = 0;
431}
432
433void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg)
434{
435 gk20a_dbg_fn("");
436
437 if (c->g->ops.fifo.free_channel_ctx_header)
438 c->g->ops.fifo.free_channel_ctx_header(c);
439 vgpu_gr_unmap_global_ctx_buffers(c);
440 vgpu_gr_free_channel_patch_ctx(c);
441 vgpu_gr_free_channel_pm_ctx(c);
442 if (!is_tsg)
443 vgpu_gr_free_channel_gr_ctx(c);
444
445 /* zcull_ctx, pm_ctx */
446
447 memset(&c->ch_ctx, 0, sizeof(struct channel_ctx_gk20a));
448
449 c->first_init = false;
450}
451
452static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
453{
454 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx;
455 struct tegra_vgpu_cmd_msg msg = {0};
456 struct tegra_vgpu_channel_bind_gr_ctx_params *p =
457 &msg.params.ch_bind_gr_ctx;
458 int err;
459
460 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX;
461 msg.handle = vgpu_get_handle(c->g);
462 p->ch_handle = c->virt_ctx;
463 p->gr_ctx_handle = gr_ctx->virt_ctx;
464 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
465 err = err ? err : msg.ret;
466 WARN_ON(err);
467
468 return err;
469}
470
471static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
472{
473 struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx;
474 struct tegra_vgpu_cmd_msg msg = {0};
475 struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
476 &msg.params.tsg_bind_gr_ctx;
477 int err;
478
479 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
480 msg.handle = vgpu_get_handle(tsg->g);
481 p->tsg_id = tsg->tsgid;
482 p->gr_ctx_handle = gr_ctx->virt_ctx;
483 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
484 err = err ? err : msg.ret;
485 WARN_ON(err);
486
487 return err;
488}
489
490int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
491{
492 struct gk20a *g = c->g;
493 struct fifo_gk20a *f = &g->fifo;
494 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
495 struct tsg_gk20a *tsg = NULL;
496 int err = 0;
497
498 gk20a_dbg_fn("");
499
500 /* an address space needs to have been bound at this point.*/
501 if (!gk20a_channel_as_bound(c)) {
502 nvgpu_err(g, "not bound to address space at time"
503 " of grctx allocation");
504 return -EINVAL;
505 }
506
507 if (!g->ops.gr.is_valid_class(g, class_num)) {
508 nvgpu_err(g, "invalid obj class 0x%x", class_num);
509 err = -EINVAL;
510 goto out;
511 }
512 c->obj_class = class_num;
513
514 if (gk20a_is_channel_marked_as_tsg(c))
515 tsg = &f->tsg[c->tsgid];
516
517 if (!tsg) {
518 /* allocate gr ctx buffer */
519 if (!ch_ctx->gr_ctx) {
520 err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
521 c->vm,
522 class_num,
523 flags);
524 if (!err)
525 err = vgpu_gr_ch_bind_gr_ctx(c);
526 if (err) {
527 nvgpu_err(g, "fail to allocate gr ctx buffer");
528 goto out;
529 }
530 } else {
531 /*TBD: needs to be more subtle about which is
532 * being allocated as some are allowed to be
533 * allocated along same channel */
534 nvgpu_err(g,
535 "too many classes alloc'd on same channel");
536 err = -EINVAL;
537 goto out;
538 }
539 } else {
540 if (!tsg->tsg_gr_ctx) {
541 tsg->vm = c->vm;
542 nvgpu_vm_get(tsg->vm);
543 err = g->ops.gr.alloc_gr_ctx(g, &tsg->tsg_gr_ctx,
544 c->vm,
545 class_num,
546 flags);
547 if (!err)
548 err = vgpu_gr_tsg_bind_gr_ctx(tsg);
549 if (err) {
550 nvgpu_err(g,
551 "fail to allocate TSG gr ctx buffer, err=%d", err);
552 nvgpu_vm_put(tsg->vm);
553 tsg->vm = NULL;
554 goto out;
555 }
556 }
557
558 ch_ctx->gr_ctx = tsg->tsg_gr_ctx;
559 err = vgpu_gr_ch_bind_gr_ctx(c);
560 if (err) {
561 nvgpu_err(g, "fail to bind gr ctx buffer");
562 goto out;
563 }
564 }
565
566 /* commit gr ctx buffer */
567 err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
568 if (err) {
569 nvgpu_err(g, "fail to commit gr ctx buffer");
570 goto out;
571 }
572
573 /* allocate patch buffer */
574 if (ch_ctx->patch_ctx.mem.priv.pages == NULL) {
575 err = vgpu_gr_alloc_channel_patch_ctx(g, c);
576 if (err) {
577 nvgpu_err(g, "fail to allocate patch buffer");
578 goto out;
579 }
580 }
581
582 /* map global buffer to channel gpu_va and commit */
583 if (!ch_ctx->global_ctx_buffer_mapped) {
584 err = vgpu_gr_map_global_ctx_buffers(g, c);
585 if (err) {
586 nvgpu_err(g, "fail to map global ctx buffer");
587 goto out;
588 }
589 gr_gk20a_elpg_protected_call(g,
590 vgpu_gr_commit_global_ctx_buffers(g, c, true));
591 }
592
593 /* load golden image */
594 if (!c->first_init) {
595 err = gr_gk20a_elpg_protected_call(g,
596 vgpu_gr_load_golden_ctx_image(g, c));
597 if (err) {
598 nvgpu_err(g, "fail to load golden ctx image");
599 goto out;
600 }
601 c->first_init = true;
602 }
603
604 gk20a_dbg_fn("done");
605 return 0;
606out:
607 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
608 can be reused so no need to release them.
609 2. golden image load is a one time thing so if
610 they pass, no need to undo. */
611 nvgpu_err(g, "fail");
612 return err;
613}
614
615static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
616{
617 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
618 u32 gpc_index;
619 int err = -ENOMEM;
620
621 gk20a_dbg_fn("");
622
623 gr->max_gpc_count = priv->constants.max_gpc_count;
624 gr->gpc_count = priv->constants.gpc_count;
625 gr->max_tpc_per_gpc_count = priv->constants.max_tpc_per_gpc_count;
626
627 gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count;
628
629 gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
630 if (!gr->gpc_tpc_count)
631 goto cleanup;
632
633 gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
634 if (!gr->gpc_tpc_mask)
635 goto cleanup;
636
637 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count *
638 gr->max_tpc_per_gpc_count *
639 sizeof(struct sm_info));
640 if (!gr->sm_to_cluster)
641 goto cleanup;
642
643 gr->tpc_count = 0;
644 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
645 gr->gpc_tpc_count[gpc_index] =
646 priv->constants.gpc_tpc_count[gpc_index];
647
648 gr->tpc_count += gr->gpc_tpc_count[gpc_index];
649
650 if (g->ops.gr.get_gpc_tpc_mask)
651 gr->gpc_tpc_mask[gpc_index] =
652 g->ops.gr.get_gpc_tpc_mask(g, gpc_index);
653 }
654
655 g->ops.gr.bundle_cb_defaults(g);
656 g->ops.gr.cb_size_default(g);
657 g->ops.gr.calc_global_ctx_buffer_size(g);
658 err = g->ops.gr.init_fs_state(g);
659 if (err)
660 goto cleanup;
661 return 0;
662cleanup:
663 nvgpu_err(g, "out of memory");
664
665 nvgpu_kfree(g, gr->gpc_tpc_count);
666 gr->gpc_tpc_count = NULL;
667
668 nvgpu_kfree(g, gr->gpc_tpc_mask);
669 gr->gpc_tpc_mask = NULL;
670
671 return err;
672}
673
674int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
675 struct channel_gk20a *c, u64 zcull_va,
676 u32 mode)
677{
678 struct tegra_vgpu_cmd_msg msg;
679 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
680 int err;
681
682 gk20a_dbg_fn("");
683
684 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
685 msg.handle = vgpu_get_handle(g);
686 p->handle = c->virt_ctx;
687 p->zcull_va = zcull_va;
688 p->mode = mode;
689 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
690
691 return (err || msg.ret) ? -ENOMEM : 0;
692}
693
694int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
695 struct gr_zcull_info *zcull_params)
696{
697 struct tegra_vgpu_cmd_msg msg;
698 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
699 int err;
700
701 gk20a_dbg_fn("");
702
703 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
704 msg.handle = vgpu_get_handle(g);
705 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
706 if (err || msg.ret)
707 return -ENOMEM;
708
709 zcull_params->width_align_pixels = p->width_align_pixels;
710 zcull_params->height_align_pixels = p->height_align_pixels;
711 zcull_params->pixel_squares_by_aliquots = p->pixel_squares_by_aliquots;
712 zcull_params->aliquot_total = p->aliquot_total;
713 zcull_params->region_byte_multiplier = p->region_byte_multiplier;
714 zcull_params->region_header_size = p->region_header_size;
715 zcull_params->subregion_header_size = p->subregion_header_size;
716 zcull_params->subregion_width_align_pixels =
717 p->subregion_width_align_pixels;
718 zcull_params->subregion_height_align_pixels =
719 p->subregion_height_align_pixels;
720 zcull_params->subregion_count = p->subregion_count;
721
722 return 0;
723}
724
725u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
726{
727 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
728
729 return priv->constants.gpc_tpc_mask[gpc_index];
730}
731
732u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
733{
734 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
735
736 gk20a_dbg_fn("");
737
738 return priv->constants.num_fbps;
739}
740
741u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
742{
743 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
744
745 gk20a_dbg_fn("");
746
747 return priv->constants.fbp_en_mask;
748}
749
750u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
751{
752 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
753
754 gk20a_dbg_fn("");
755
756 return priv->constants.ltc_per_fbp;
757}
758
759u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
760{
761 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
762
763 gk20a_dbg_fn("");
764
765 return priv->constants.max_lts_per_ltc;
766}
767
768u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
769{
770 /* no one use it yet */
771 return NULL;
772}
773
774int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
775 struct zbc_entry *zbc_val)
776{
777 struct tegra_vgpu_cmd_msg msg = {0};
778 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
779 int err;
780
781 gk20a_dbg_fn("");
782
783 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
784 msg.handle = vgpu_get_handle(g);
785
786 p->type = zbc_val->type;
787 p->format = zbc_val->format;
788 switch (p->type) {
789 case GK20A_ZBC_TYPE_COLOR:
790 memcpy(p->color_ds, zbc_val->color_ds, sizeof(p->color_ds));
791 memcpy(p->color_l2, zbc_val->color_l2, sizeof(p->color_l2));
792 break;
793 case GK20A_ZBC_TYPE_DEPTH:
794 p->depth = zbc_val->depth;
795 break;
796 default:
797 return -EINVAL;
798 }
799
800 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
801
802 return (err || msg.ret) ? -ENOMEM : 0;
803}
804
805int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
806 struct zbc_query_params *query_params)
807{
808 struct tegra_vgpu_cmd_msg msg = {0};
809 struct tegra_vgpu_zbc_query_table_params *p =
810 &msg.params.zbc_query_table;
811 int err;
812
813 gk20a_dbg_fn("");
814
815 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
816 msg.handle = vgpu_get_handle(g);
817
818 p->type = query_params->type;
819 p->index_size = query_params->index_size;
820
821 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
822 if (err || msg.ret)
823 return -ENOMEM;
824
825 switch (query_params->type) {
826 case GK20A_ZBC_TYPE_COLOR:
827 memcpy(query_params->color_ds, p->color_ds,
828 sizeof(query_params->color_ds));
829 memcpy(query_params->color_l2, p->color_l2,
830 sizeof(query_params->color_l2));
831 break;
832 case GK20A_ZBC_TYPE_DEPTH:
833 query_params->depth = p->depth;
834 break;
835 case GK20A_ZBC_TYPE_INVALID:
836 query_params->index_size = p->index_size;
837 break;
838 default:
839 return -EINVAL;
840 }
841 query_params->ref_cnt = p->ref_cnt;
842 query_params->format = p->format;
843
844 return 0;
845}
846
847static void vgpu_remove_gr_support(struct gr_gk20a *gr)
848{
849 gk20a_dbg_fn("");
850
851 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags);
852
853 nvgpu_kfree(gr->g, gr->sm_error_states);
854 gr->sm_error_states = NULL;
855
856 nvgpu_kfree(gr->g, gr->gpc_tpc_mask);
857 gr->gpc_tpc_mask = NULL;
858
859 nvgpu_kfree(gr->g, gr->sm_to_cluster);
860 gr->sm_to_cluster = NULL;
861
862 nvgpu_kfree(gr->g, gr->gpc_tpc_count);
863 gr->gpc_tpc_count = NULL;
864}
865
866static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
867{
868 struct gr_gk20a *gr = &g->gr;
869 int err;
870
871 gk20a_dbg_fn("");
872
873 if (gr->sw_ready) {
874 gk20a_dbg_fn("skip init");
875 return 0;
876 }
877
878 gr->g = g;
879
880#if defined(CONFIG_GK20A_CYCLE_STATS)
881 nvgpu_mutex_init(&g->gr.cs_lock);
882#endif
883
884 err = vgpu_gr_init_gr_config(g, gr);
885 if (err)
886 goto clean_up;
887
888 err = g->ops.gr.init_ctx_state(g);
889 if (err)
890 goto clean_up;
891
892 err = g->ops.ltc.init_comptags(g, gr);
893 if (err)
894 goto clean_up;
895
896 err = vgpu_gr_alloc_global_ctx_buffers(g);
897 if (err)
898 goto clean_up;
899
900 nvgpu_mutex_init(&gr->ctx_mutex);
901
902 gr->sm_error_states = nvgpu_kzalloc(g,
903 sizeof(struct nvgpu_gr_sm_error_state) *
904 gr->no_of_sm);
905 if (!gr->sm_error_states) {
906 err = -ENOMEM;
907 goto clean_up;
908 }
909
910 gr->remove_support = vgpu_remove_gr_support;
911 gr->sw_ready = true;
912
913 gk20a_dbg_fn("done");
914 return 0;
915
916clean_up:
917 nvgpu_err(g, "fail");
918 vgpu_remove_gr_support(gr);
919 return err;
920}
921
922int vgpu_init_gr_support(struct gk20a *g)
923{
924 gk20a_dbg_fn("");
925
926 return vgpu_gr_init_gr_setup_sw(g);
927}
928
929int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
930{
931 struct fifo_gk20a *f = &g->fifo;
932 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
933
934 gk20a_dbg_fn("");
935 if (!ch)
936 return 0;
937
938 if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY &&
939 info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE)
940 nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid);
941
942 switch (info->type) {
943 case TEGRA_VGPU_GR_INTR_NOTIFY:
944 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
945 break;
946 case TEGRA_VGPU_GR_INTR_SEMAPHORE:
947 nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
948 break;
949 case TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT:
950 gk20a_set_error_notifier(ch,
951 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
952 break;
953 case TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY:
954 gk20a_set_error_notifier(ch,
955 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
956 case TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD:
957 break;
958 case TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS:
959 gk20a_set_error_notifier(ch,
960 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
961 break;
962 case TEGRA_VGPU_GR_INTR_FECS_ERROR:
963 break;
964 case TEGRA_VGPU_GR_INTR_CLASS_ERROR:
965 gk20a_set_error_notifier(ch,
966 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
967 break;
968 case TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD:
969 gk20a_set_error_notifier(ch,
970 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
971 break;
972 case TEGRA_VGPU_GR_INTR_EXCEPTION:
973 gk20a_set_error_notifier(ch,
974 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
975 break;
976 case TEGRA_VGPU_GR_INTR_SM_EXCEPTION:
977 gk20a_dbg_gpu_post_events(ch);
978 break;
979 default:
980 WARN_ON(1);
981 break;
982 }
983
984 gk20a_channel_put(ch);
985 return 0;
986}
987
988int vgpu_gr_nonstall_isr(struct gk20a *g,
989 struct tegra_vgpu_gr_nonstall_intr_info *info)
990{
991 gk20a_dbg_fn("");
992
993 switch (info->type) {
994 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE:
995 gk20a_channel_semaphore_wakeup(g, true);
996 break;
997 default:
998 WARN_ON(1);
999 break;
1000 }
1001
1002 return 0;
1003}
1004
1005int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1006 struct channel_gk20a *ch, u64 sms, bool enable)
1007{
1008 struct tegra_vgpu_cmd_msg msg;
1009 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1010 int err;
1011
1012 gk20a_dbg_fn("");
1013
1014 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1015 msg.handle = vgpu_get_handle(g);
1016 p->handle = ch->virt_ctx;
1017 p->sms = sms;
1018 p->enable = (u32)enable;
1019 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1020 WARN_ON(err || msg.ret);
1021
1022 return err ? err : msg.ret;
1023}
1024
1025int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1026 struct channel_gk20a *ch, bool enable)
1027{
1028 struct tegra_vgpu_cmd_msg msg;
1029 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1030 int err;
1031
1032 gk20a_dbg_fn("");
1033
1034 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1035 msg.handle = vgpu_get_handle(g);
1036 p->handle = ch->virt_ctx;
1037
1038 if (enable)
1039 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW;
1040 else
1041 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW;
1042
1043 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1044 WARN_ON(err || msg.ret);
1045
1046 return err ? err : msg.ret;
1047}
1048
1049int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1050 struct channel_gk20a *ch, bool enable)
1051{
1052 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
1053 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
1054 struct tegra_vgpu_cmd_msg msg;
1055 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1056 int err;
1057
1058 gk20a_dbg_fn("");
1059
1060 if (enable) {
1061 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW;
1062
1063 /* Allocate buffer if necessary */
1064 if (pm_ctx->mem.gpu_va == 0) {
1065 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1066 g->gr.ctx_vars.pm_ctxsw_image_size,
1067 gmmu_page_size_kernel);
1068
1069 if (!pm_ctx->mem.gpu_va)
1070 return -ENOMEM;
1071 pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
1072 }
1073 } else
1074 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW;
1075
1076 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1077 msg.handle = vgpu_get_handle(g);
1078 p->handle = ch->virt_ctx;
1079 p->gpu_va = pm_ctx->mem.gpu_va;
1080
1081 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1082 WARN_ON(err || msg.ret);
1083
1084 return err ? err : msg.ret;
1085}
1086
1087int vgpu_gr_clear_sm_error_state(struct gk20a *g,
1088 struct channel_gk20a *ch, u32 sm_id)
1089{
1090 struct gr_gk20a *gr = &g->gr;
1091 struct tegra_vgpu_cmd_msg msg;
1092 struct tegra_vgpu_clear_sm_error_state *p =
1093 &msg.params.clear_sm_error_state;
1094 int err;
1095
1096 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1097 msg.cmd = TEGRA_VGPU_CMD_CLEAR_SM_ERROR_STATE;
1098 msg.handle = vgpu_get_handle(g);
1099 p->handle = ch->virt_ctx;
1100 p->sm_id = sm_id;
1101
1102 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1103 WARN_ON(err || msg.ret);
1104
1105 memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states));
1106 nvgpu_mutex_release(&g->dbg_sessions_lock);
1107
1108 return err ? err : msg.ret;
1109
1110
1111 return 0;
1112}
1113
1114static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
1115 struct dbg_session_gk20a *dbg_s,
1116 int *ctx_resident_ch_fd, u32 cmd)
1117{
1118 struct dbg_session_channel_data *ch_data;
1119 struct tegra_vgpu_cmd_msg msg;
1120 struct tegra_vgpu_suspend_resume_contexts *p;
1121 size_t n;
1122 int channel_fd = -1;
1123 int err = 0;
1124 void *handle = NULL;
1125 u16 *oob;
1126 size_t oob_size;
1127
1128 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1129 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1130
1131 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
1132 tegra_gr_comm_get_server_vmid(), TEGRA_VGPU_QUEUE_CMD,
1133 (void **)&oob, &oob_size);
1134 if (!handle) {
1135 err = -EINVAL;
1136 goto done;
1137 }
1138
1139 n = 0;
1140 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry)
1141 n++;
1142
1143 if (oob_size < n * sizeof(u16)) {
1144 err = -ENOMEM;
1145 goto done;
1146 }
1147
1148 msg.cmd = cmd;
1149 msg.handle = vgpu_get_handle(g);
1150 p = &msg.params.suspend_contexts;
1151 p->num_channels = n;
1152 n = 0;
1153 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry)
1154 oob[n++] = (u16)ch_data->chid;
1155
1156 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1157 if (err || msg.ret) {
1158 err = -ENOMEM;
1159 goto done;
1160 }
1161
1162 if (p->resident_chid != (u16)~0) {
1163 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
1164 if (ch_data->chid == p->resident_chid) {
1165 channel_fd = ch_data->channel_fd;
1166 break;
1167 }
1168 }
1169 }
1170
1171done:
1172 if (handle)
1173 tegra_gr_comm_oob_put_ptr(handle);
1174 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1175 nvgpu_mutex_release(&g->dbg_sessions_lock);
1176 *ctx_resident_ch_fd = channel_fd;
1177 return err;
1178}
1179
1180int vgpu_gr_suspend_contexts(struct gk20a *g,
1181 struct dbg_session_gk20a *dbg_s,
1182 int *ctx_resident_ch_fd)
1183{
1184 return vgpu_gr_suspend_resume_contexts(g, dbg_s,
1185 ctx_resident_ch_fd, TEGRA_VGPU_CMD_SUSPEND_CONTEXTS);
1186}
1187
1188int vgpu_gr_resume_contexts(struct gk20a *g,
1189 struct dbg_session_gk20a *dbg_s,
1190 int *ctx_resident_ch_fd)
1191{
1192 return vgpu_gr_suspend_resume_contexts(g, dbg_s,
1193 ctx_resident_ch_fd, TEGRA_VGPU_CMD_RESUME_CONTEXTS);
1194}
1195
1196void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
1197 struct tegra_vgpu_sm_esr_info *info)
1198{
1199 struct nvgpu_gr_sm_error_state *sm_error_states;
1200
1201 if (info->sm_id >= g->gr.no_of_sm) {
1202 nvgpu_err(g, "invalid smd_id %d / %d",
1203 info->sm_id, g->gr.no_of_sm);
1204 return;
1205 }
1206
1207 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1208
1209 sm_error_states = &g->gr.sm_error_states[info->sm_id];
1210
1211 sm_error_states->hww_global_esr = info->hww_global_esr;
1212 sm_error_states->hww_warp_esr = info->hww_warp_esr;
1213 sm_error_states->hww_warp_esr_pc = info->hww_warp_esr_pc;
1214 sm_error_states->hww_global_esr_report_mask =
1215 info->hww_global_esr_report_mask;
1216 sm_error_states->hww_warp_esr_report_mask =
1217 info->hww_warp_esr_report_mask;
1218
1219 nvgpu_mutex_release(&g->dbg_sessions_lock);
1220}
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.h b/drivers/gpu/nvgpu/vgpu/gr_vgpu.h
deleted file mode 100644
index b43e334a..00000000
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _GR_VGPU_H_
24#define _GR_VGPU_H_
25
26#include <nvgpu/types.h>
27
28struct gk20a;
29struct channel_gk20a;
30struct gr_gk20a;
31struct gr_zcull_info;
32struct zbc_entry;
33struct zbc_query_params;
34struct dbg_session_gk20a;
35
36void vgpu_gr_detect_sm_arch(struct gk20a *g);
37void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
38int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
39int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
40 struct channel_gk20a *c, u64 zcull_va,
41 u32 mode);
42int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
43 struct gr_zcull_info *zcull_params);
44u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
45u32 vgpu_gr_get_max_fbps_count(struct gk20a *g);
46u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g);
47u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g);
48u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g);
49u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g);
50int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
51 struct zbc_entry *zbc_val);
52int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
53 struct zbc_query_params *query_params);
54int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
55 struct channel_gk20a *ch, u64 sms, bool enable);
56int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
57 struct channel_gk20a *ch, bool enable);
58int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
59 struct channel_gk20a *ch, bool enable);
60int vgpu_gr_clear_sm_error_state(struct gk20a *g,
61 struct channel_gk20a *ch, u32 sm_id);
62int vgpu_gr_suspend_contexts(struct gk20a *g,
63 struct dbg_session_gk20a *dbg_s,
64 int *ctx_resident_ch_fd);
65int vgpu_gr_resume_contexts(struct gk20a *g,
66 struct dbg_session_gk20a *dbg_s,
67 int *ctx_resident_ch_fd);
68int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va);
69
70#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c b/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c
deleted file mode 100644
index fea473a7..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include "vgpu/clk_vgpu.h"
25#include "common/linux/platform_gk20a.h"
26#include "common/linux/os_linux.h"
27
28#include <nvgpu/nvhost.h>
29#include <nvgpu/nvhost_t19x.h>
30
31#include <linux/platform_device.h>
32
33static int gv11b_vgpu_probe(struct device *dev)
34{
35 struct platform_device *pdev = to_platform_device(dev);
36 struct gk20a_platform *platform = dev_get_drvdata(dev);
37 struct resource *r;
38 void __iomem *regs;
39 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(platform->g);
40 struct gk20a *g = platform->g;
41 int ret;
42
43 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usermode");
44 if (!r) {
45 dev_err(dev, "failed to get usermode regs\n");
46 return -ENXIO;
47 }
48 regs = devm_ioremap_resource(dev, r);
49 if (IS_ERR(regs)) {
50 dev_err(dev, "failed to map usermode regs\n");
51 return PTR_ERR(regs);
52 }
53 l->t19x.usermode_regs = regs;
54
55#ifdef CONFIG_TEGRA_GK20A_NVHOST
56 ret = nvgpu_get_nvhost_dev(g);
57 if (ret) {
58 l->t19x.usermode_regs = NULL;
59 return ret;
60 }
61
62 ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(g->nvhost_dev,
63 &g->syncpt_unit_base,
64 &g->syncpt_unit_size);
65 if (ret) {
66 dev_err(dev, "Failed to get syncpt interface");
67 return -ENOSYS;
68 }
69 g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
70 nvgpu_info(g, "syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
71 g->syncpt_unit_base, g->syncpt_unit_size, g->syncpt_size);
72#endif
73 vgpu_init_clk_support(platform->g);
74
75 return 0;
76}
77
78struct gk20a_platform gv11b_vgpu_tegra_platform = {
79 .has_syncpoints = true,
80 .aggressive_sync_destroy_thresh = 64,
81
82 /* power management configuration */
83 .can_railgate_init = false,
84 .can_elpg_init = false,
85 .enable_slcg = false,
86 .enable_blcg = false,
87 .enable_elcg = false,
88 .enable_elpg = false,
89 .enable_aelpg = false,
90 .can_slcg = false,
91 .can_blcg = false,
92 .can_elcg = false,
93
94 .ch_wdt_timeout_ms = 5000,
95
96 .probe = gv11b_vgpu_probe,
97
98 .clk_round_rate = vgpu_clk_round_rate,
99 .get_clk_freqs = vgpu_clk_get_freqs,
100
101 /* frequency scaling configuration */
102 .devfreq_governor = "userspace",
103
104 .virtual_dev = true,
105};
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
deleted file mode 100644
index ae9d52a7..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24
25#include "vgpu/vgpu.h"
26#include "gv11b/fifo_gv11b.h"
27#include <nvgpu/nvhost_t19x.h>
28
29#include <linux/tegra_vgpu.h>
30
31#ifdef CONFIG_TEGRA_GK20A_NVHOST
32int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
33 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
34{
35 int err;
36 struct gk20a *g = c->g;
37 struct vm_gk20a *vm = c->vm;
38 struct tegra_vgpu_cmd_msg msg = {};
39 struct tegra_vgpu_map_syncpt_params *p = &msg.params.t19x.map_syncpt;
40
41 /*
42 * Add ro map for complete sync point shim range in vm.
43 * All channels sharing same vm will share same ro mapping.
44 * Create rw map for current channel sync point.
45 */
46 if (!vm->syncpt_ro_map_gpu_va) {
47 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
48 g->syncpt_unit_size,
49 gmmu_page_size_kernel);
50 if (!vm->syncpt_ro_map_gpu_va) {
51 nvgpu_err(g, "allocating read-only va space failed");
52 return -ENOMEM;
53 }
54
55 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
56 msg.handle = vgpu_get_handle(g);
57 p->as_handle = c->vm->handle;
58 p->gpu_va = vm->syncpt_ro_map_gpu_va;
59 p->len = g->syncpt_unit_size;
60 p->offset = 0;
61 p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
62 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
63 err = err ? err : msg.ret;
64 if (err) {
65 nvgpu_err(g,
66 "mapping read-only va space failed err %d",
67 err);
68 __nvgpu_vm_free_va(c->vm, vm->syncpt_ro_map_gpu_va,
69 gmmu_page_size_kernel);
70 vm->syncpt_ro_map_gpu_va = 0;
71 return err;
72 }
73 }
74
75 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
76 gmmu_page_size_kernel);
77 if (!syncpt_buf->gpu_va) {
78 nvgpu_err(g, "allocating syncpt va space failed");
79 return -ENOMEM;
80 }
81
82 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
83 msg.handle = vgpu_get_handle(g);
84 p->as_handle = c->vm->handle;
85 p->gpu_va = syncpt_buf->gpu_va;
86 p->len = g->syncpt_size;
87 p->offset =
88 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
89 p->prot = TEGRA_VGPU_MAP_PROT_NONE;
90 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
91 err = err ? err : msg.ret;
92 if (err) {
93 nvgpu_err(g, "mapping syncpt va space failed err %d", err);
94 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
95 gmmu_page_size_kernel);
96 return err;
97 }
98
99 return 0;
100}
101#endif /* CONFIG_TEGRA_GK20A_NVHOST */
102
103int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)
104{
105 struct fifo_gk20a *f = &g->fifo;
106 int err;
107
108 err = vgpu_get_attribute(vgpu_get_handle(g),
109 TEGRA_VGPU_ATTRIB_MAX_SUBCTX_COUNT,
110 &f->t19x.max_subctx_count);
111 if (err) {
112 nvgpu_err(g, "get max_subctx_count failed %d", err);
113 return err;
114 }
115
116 return 0;
117}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h
deleted file mode 100644
index bea935d3..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_FIFO_GV11B_H_
24#define _VGPU_FIFO_GV11B_H_
25
26struct gk20a;
27
28int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g);
29int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
30 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
31#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c
deleted file mode 100644
index 89952221..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <vgpu/gr_vgpu.h>
25
26#include "vgpu_subctx_gv11b.h"
27
28int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
29{
30 int err;
31
32 err = vgpu_gv11b_alloc_subctx_header(c);
33 if (err)
34 return err;
35
36 err = vgpu_gr_commit_inst(c, gpu_va);
37 if (err)
38 vgpu_gv11b_free_subctx_header(c);
39
40 return err;
41}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h
deleted file mode 100644
index 562198ca..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_GR_GV11B_H_
24#define _VGPU_GR_GV11B_H_
25
26struct channel_gk20a;
27
28int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va);
29
30#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
deleted file mode 100644
index feac195e..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15
16#include <nvgpu/enabled.h>
17#include <nvgpu/enabled_t19x.h>
18
19#include "vgpu/vgpu.h"
20#include "vgpu_gv11b.h"
21
22int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
23{
24 int err;
25
26 gk20a_dbg_fn("");
27
28 err = vgpu_init_gpu_characteristics(g);
29 if (err) {
30 nvgpu_err(g, "vgpu_init_gpu_characteristics failed, err %d\n", err);
31 return err;
32 }
33
34 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
35
36 return 0;
37}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h
deleted file mode 100644
index 9413904b..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _VGPU_GV11B_H_
15#define _VGPU_GV11B_H_
16
17struct gk20a;
18
19int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g);
20
21#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
deleted file mode 100644
index 17d6f049..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
+++ /dev/null
@@ -1,642 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <gv11b/hal_gv11b.h>
25#include <vgpu/vgpu.h>
26#include <vgpu/fifo_vgpu.h>
27#include <vgpu/gr_vgpu.h>
28#include <vgpu/ltc_vgpu.h>
29#include <vgpu/mm_vgpu.h>
30#include <vgpu/dbg_vgpu.h>
31#include <vgpu/fecs_trace_vgpu.h>
32#include <vgpu/css_vgpu.h>
33#include <vgpu/vgpu_t19x.h>
34#include <vgpu/gm20b/vgpu_gr_gm20b.h>
35#include <vgpu/gp10b/vgpu_mm_gp10b.h>
36#include <vgpu/gp10b/vgpu_gr_gp10b.h>
37
38#include <gk20a/fb_gk20a.h>
39#include <gk20a/flcn_gk20a.h>
40#include <gk20a/bus_gk20a.h>
41#include <gk20a/mc_gk20a.h>
42
43#include <gm20b/gr_gm20b.h>
44#include <gm20b/fb_gm20b.h>
45#include <gm20b/fifo_gm20b.h>
46#include <gm20b/pmu_gm20b.h>
47#include <gm20b/mm_gm20b.h>
48#include <gm20b/acr_gm20b.h>
49#include <gm20b/ltc_gm20b.h>
50
51#include <gp10b/fb_gp10b.h>
52#include <gp10b/pmu_gp10b.h>
53#include <gp10b/mm_gp10b.h>
54#include <gp10b/mc_gp10b.h>
55#include <gp10b/ce_gp10b.h>
56#include <gp10b/fifo_gp10b.h>
57#include <gp10b/therm_gp10b.h>
58#include <gp10b/priv_ring_gp10b.h>
59#include <gp10b/ltc_gp10b.h>
60
61#include <gp106/pmu_gp106.h>
62#include <gp106/acr_gp106.h>
63
64#include <gv11b/fb_gv11b.h>
65#include <gv11b/pmu_gv11b.h>
66#include <gv11b/acr_gv11b.h>
67#include <gv11b/mm_gv11b.h>
68#include <gv11b/mc_gv11b.h>
69#include <gv11b/ce_gv11b.h>
70#include <gv11b/fifo_gv11b.h>
71#include <gv11b/therm_gv11b.h>
72#include <gv11b/regops_gv11b.h>
73#include <gv11b/gr_ctx_gv11b.h>
74#include <gv11b/ltc_gv11b.h>
75#include <gv11b/gv11b_gating_reglist.h>
76
77#include <gv100/gr_gv100.h>
78
79#include <nvgpu/enabled.h>
80
81#include "vgpu_gv11b.h"
82#include "vgpu_gr_gv11b.h"
83#include "vgpu_fifo_gv11b.h"
84#include "vgpu_subctx_gv11b.h"
85#include "vgpu_tsg_gv11b.h"
86
87#include <nvgpu/hw/gv11b/hw_fuse_gv11b.h>
88#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
89#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
90#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
91#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
92
93static const struct gpu_ops vgpu_gv11b_ops = {
94 .ltc = {
95 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
96 .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
97 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
98 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
99 .init_cbc = NULL,
100 .init_fs_state = vgpu_ltc_init_fs_state,
101 .init_comptags = vgpu_ltc_init_comptags,
102 .cbc_ctrl = NULL,
103 .isr = gv11b_ltc_isr,
104 .cbc_fix_config = gv11b_ltc_cbc_fix_config,
105 .flush = gm20b_flush_ltc,
106 .set_enabled = gp10b_ltc_set_enabled,
107 },
108 .ce2 = {
109 .isr_stall = gv11b_ce_isr,
110 .isr_nonstall = gp10b_ce_nonstall_isr,
111 .get_num_pce = vgpu_ce_get_num_pce,
112 },
113 .gr = {
114 .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
115 .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults,
116 .cb_size_default = gr_gv11b_cb_size_default,
117 .calc_global_ctx_buffer_size =
118 gr_gv11b_calc_global_ctx_buffer_size,
119 .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
120 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
121 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
122 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
123 .handle_sw_method = gr_gv11b_handle_sw_method,
124 .set_alpha_circular_buffer_size =
125 gr_gv11b_set_alpha_circular_buffer_size,
126 .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
127 .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
128 .is_valid_class = gr_gv11b_is_valid_class,
129 .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
130 .is_valid_compute_class = gr_gv11b_is_valid_compute_class,
131 .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
132 .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
133 .init_fs_state = vgpu_gm20b_init_fs_state,
134 .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
135 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
136 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
137 .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
138 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
139 .free_channel_ctx = vgpu_gr_free_channel_ctx,
140 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
141 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
142 .get_zcull_info = vgpu_gr_get_zcull_info,
143 .is_tpc_addr = gr_gm20b_is_tpc_addr,
144 .get_tpc_num = gr_gm20b_get_tpc_num,
145 .detect_sm_arch = vgpu_gr_detect_sm_arch,
146 .add_zbc_color = gr_gp10b_add_zbc_color,
147 .add_zbc_depth = gr_gp10b_add_zbc_depth,
148 .zbc_set_table = vgpu_gr_add_zbc,
149 .zbc_query_table = vgpu_gr_query_zbc,
150 .pmu_save_zbc = gk20a_pmu_save_zbc,
151 .add_zbc = gr_gk20a_add_zbc,
152 .pagepool_default_size = gr_gv11b_pagepool_default_size,
153 .init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
154 .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
155 .free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
156 .update_ctxsw_preemption_mode =
157 gr_gp10b_update_ctxsw_preemption_mode,
158 .dump_gr_regs = NULL,
159 .update_pc_sampling = gr_gm20b_update_pc_sampling,
160 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
161 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
162 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
163 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
164 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
165 .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
166 .wait_empty = gr_gv11b_wait_empty,
167 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
168 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
169 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
170 .bpt_reg_info = gv11b_gr_bpt_reg_info,
171 .get_access_map = gr_gv11b_get_access_map,
172 .handle_fecs_error = gr_gv11b_handle_fecs_error,
173 .handle_sm_exception = gr_gk20a_handle_sm_exception,
174 .handle_tex_exception = gr_gv11b_handle_tex_exception,
175 .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
176 .enable_exceptions = gr_gv11b_enable_exceptions,
177 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
178 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
179 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
180 .record_sm_error_state = gv11b_gr_record_sm_error_state,
181 .update_sm_error_state = gv11b_gr_update_sm_error_state,
182 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
183 .suspend_contexts = vgpu_gr_suspend_contexts,
184 .resume_contexts = vgpu_gr_resume_contexts,
185 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
186 .init_sm_id_table = gr_gv100_init_sm_id_table,
187 .load_smid_config = gr_gv11b_load_smid_config,
188 .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
189 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
190 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
191 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
192 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
193 .setup_rop_mapping = gr_gv11b_setup_rop_mapping,
194 .program_zcull_mapping = gr_gv11b_program_zcull_mapping,
195 .commit_global_timeslice = gr_gv11b_commit_global_timeslice,
196 .commit_inst = vgpu_gr_gv11b_commit_inst,
197 .write_zcull_ptr = gr_gv11b_write_zcull_ptr,
198 .write_pm_ptr = gr_gv11b_write_pm_ptr,
199 .init_elcg_mode = gr_gv11b_init_elcg_mode,
200 .load_tpc_mask = gr_gv11b_load_tpc_mask,
201 .inval_icache = gr_gk20a_inval_icache,
202 .trigger_suspend = gv11b_gr_sm_trigger_suspend,
203 .wait_for_pause = gr_gk20a_wait_for_pause,
204 .resume_from_pause = gv11b_gr_resume_from_pause,
205 .clear_sm_errors = gr_gk20a_clear_sm_errors,
206 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
207 .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
208 .sm_debugger_attached = gv11b_gr_sm_debugger_attached,
209 .suspend_single_sm = gv11b_gr_suspend_single_sm,
210 .suspend_all_sms = gv11b_gr_suspend_all_sms,
211 .resume_single_sm = gv11b_gr_resume_single_sm,
212 .resume_all_sms = gv11b_gr_resume_all_sms,
213 .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
214 .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
215 .get_sm_no_lock_down_hww_global_esr_mask =
216 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
217 .lock_down_sm = gv11b_gr_lock_down_sm,
218 .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
219 .clear_sm_hww = gv11b_gr_clear_sm_hww,
220 .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
221 .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
222 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
223 .set_boosted_ctx = NULL,
224 .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode,
225 .set_czf_bypass = NULL,
226 .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
227 .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
228 .init_preemption_state = NULL,
229 .update_boosted_ctx = NULL,
230 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
231 .create_gr_sysfs = gr_gv11b_create_sysfs,
232 .set_ctxsw_preemption_mode = vgpu_gr_gp10b_set_ctxsw_preemption_mode,
233 .is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
234 .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
235 .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
236 .zbc_s_query_table = gr_gv11b_zbc_s_query_table,
237 .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
238 .handle_gpc_gpcmmu_exception =
239 gr_gv11b_handle_gpc_gpcmmu_exception,
240 .add_zbc_type_s = gr_gv11b_add_zbc_type_s,
241 .get_egpc_base = gv11b_gr_get_egpc_base,
242 .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
243 .handle_gpc_gpccs_exception =
244 gr_gv11b_handle_gpc_gpccs_exception,
245 .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
246 .access_smpc_reg = gv11b_gr_access_smpc_reg,
247 .is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
248 .add_zbc_s = gr_gv11b_add_zbc_stencil,
249 .handle_gcc_exception = gr_gv11b_handle_gcc_exception,
250 .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
251 .handle_tpc_sm_ecc_exception =
252 gr_gv11b_handle_tpc_sm_ecc_exception,
253 .decode_egpc_addr = gv11b_gr_decode_egpc_addr,
254 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
255 },
256 .fb = {
257 .reset = gv11b_fb_reset,
258 .init_hw = gk20a_fb_init_hw,
259 .init_fs_state = gv11b_fb_init_fs_state,
260 .init_cbc = gv11b_fb_init_cbc,
261 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
262 .set_use_full_comp_tag_line =
263 gm20b_fb_set_use_full_comp_tag_line,
264 .compression_page_size = gp10b_fb_compression_page_size,
265 .compressible_page_size = gp10b_fb_compressible_page_size,
266 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
267 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
268 .read_wpr_info = gm20b_fb_read_wpr_info,
269 .is_debug_mode_enabled = NULL,
270 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
271 .tlb_invalidate = vgpu_mm_tlb_invalidate,
272 .hub_isr = gv11b_fb_hub_isr,
273 },
274 .clock_gating = {
275 .slcg_bus_load_gating_prod =
276 gv11b_slcg_bus_load_gating_prod,
277 .slcg_ce2_load_gating_prod =
278 gv11b_slcg_ce2_load_gating_prod,
279 .slcg_chiplet_load_gating_prod =
280 gv11b_slcg_chiplet_load_gating_prod,
281 .slcg_ctxsw_firmware_load_gating_prod =
282 gv11b_slcg_ctxsw_firmware_load_gating_prod,
283 .slcg_fb_load_gating_prod =
284 gv11b_slcg_fb_load_gating_prod,
285 .slcg_fifo_load_gating_prod =
286 gv11b_slcg_fifo_load_gating_prod,
287 .slcg_gr_load_gating_prod =
288 gr_gv11b_slcg_gr_load_gating_prod,
289 .slcg_ltc_load_gating_prod =
290 ltc_gv11b_slcg_ltc_load_gating_prod,
291 .slcg_perf_load_gating_prod =
292 gv11b_slcg_perf_load_gating_prod,
293 .slcg_priring_load_gating_prod =
294 gv11b_slcg_priring_load_gating_prod,
295 .slcg_pmu_load_gating_prod =
296 gv11b_slcg_pmu_load_gating_prod,
297 .slcg_therm_load_gating_prod =
298 gv11b_slcg_therm_load_gating_prod,
299 .slcg_xbar_load_gating_prod =
300 gv11b_slcg_xbar_load_gating_prod,
301 .blcg_bus_load_gating_prod =
302 gv11b_blcg_bus_load_gating_prod,
303 .blcg_ce_load_gating_prod =
304 gv11b_blcg_ce_load_gating_prod,
305 .blcg_ctxsw_firmware_load_gating_prod =
306 gv11b_blcg_ctxsw_firmware_load_gating_prod,
307 .blcg_fb_load_gating_prod =
308 gv11b_blcg_fb_load_gating_prod,
309 .blcg_fifo_load_gating_prod =
310 gv11b_blcg_fifo_load_gating_prod,
311 .blcg_gr_load_gating_prod =
312 gv11b_blcg_gr_load_gating_prod,
313 .blcg_ltc_load_gating_prod =
314 gv11b_blcg_ltc_load_gating_prod,
315 .blcg_pwr_csb_load_gating_prod =
316 gv11b_blcg_pwr_csb_load_gating_prod,
317 .blcg_pmu_load_gating_prod =
318 gv11b_blcg_pmu_load_gating_prod,
319 .blcg_xbar_load_gating_prod =
320 gv11b_blcg_xbar_load_gating_prod,
321 .pg_gr_load_gating_prod =
322 gr_gv11b_pg_gr_load_gating_prod,
323 },
324 .fifo = {
325 .init_fifo_setup_hw = vgpu_gv11b_init_fifo_setup_hw,
326 .bind_channel = vgpu_channel_bind,
327 .unbind_channel = vgpu_channel_unbind,
328 .disable_channel = vgpu_channel_disable,
329 .enable_channel = vgpu_channel_enable,
330 .alloc_inst = vgpu_channel_alloc_inst,
331 .free_inst = vgpu_channel_free_inst,
332 .setup_ramfc = vgpu_channel_setup_ramfc,
333 .channel_set_timeslice = vgpu_channel_set_timeslice,
334 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
335 .setup_userd = gk20a_fifo_setup_userd,
336 .userd_gp_get = gv11b_userd_gp_get,
337 .userd_gp_put = gv11b_userd_gp_put,
338 .userd_pb_get = gv11b_userd_pb_get,
339 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
340 .preempt_channel = vgpu_fifo_preempt_channel,
341 .preempt_tsg = vgpu_fifo_preempt_tsg,
342 .enable_tsg = vgpu_enable_tsg,
343 .disable_tsg = gk20a_disable_tsg,
344 .tsg_verify_channel_status = NULL,
345 .tsg_verify_status_ctx_reload = NULL,
346 /* TODO: implement it for CE fault */
347 .tsg_verify_status_faulted = NULL,
348 .update_runlist = vgpu_fifo_update_runlist,
349 .trigger_mmu_fault = NULL,
350 .get_mmu_fault_info = NULL,
351 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
352 .get_num_fifos = gv11b_fifo_get_num_fifos,
353 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
354 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
355 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
356 .tsg_open = vgpu_tsg_open,
357 .force_reset_ch = vgpu_fifo_force_reset_ch,
358 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
359 .device_info_data_parse = gp10b_device_info_data_parse,
360 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
361 .init_engine_info = vgpu_fifo_init_engine_info,
362 .runlist_entry_size = ram_rl_entry_size_v,
363 .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
364 .get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
365 .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
366 .dump_pbdma_status = gk20a_dump_pbdma_status,
367 .dump_eng_status = gv11b_dump_eng_status,
368 .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
369 .intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
370 .is_preempt_pending = gv11b_fifo_is_preempt_pending,
371 .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
372 .reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
373 .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
374 .handle_sched_error = gv11b_fifo_handle_sched_error,
375 .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
376 .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
377 .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
378 .deinit_eng_method_buffers =
379 gv11b_fifo_deinit_eng_method_buffers,
380 .tsg_bind_channel = vgpu_gv11b_tsg_bind_channel,
381 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
382#ifdef CONFIG_TEGRA_GK20A_NVHOST
383 .alloc_syncpt_buf = vgpu_gv11b_fifo_alloc_syncpt_buf,
384 .free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
385 .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
386 .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
387 .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
388 .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
389#endif
390 .resetup_ramfc = NULL,
391 .reschedule_runlist = NULL,
392 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
393 .free_channel_ctx_header = vgpu_gv11b_free_subctx_header,
394 .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
395 .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
396 },
397 .gr_ctx = {
398 .get_netlist_name = gr_gv11b_get_netlist_name,
399 .is_fw_defined = gr_gv11b_is_firmware_defined,
400 },
401#ifdef CONFIG_GK20A_CTXSW_TRACE
402 .fecs_trace = {
403 .alloc_user_buffer = NULL,
404 .free_user_buffer = NULL,
405 .mmap_user_buffer = NULL,
406 .init = NULL,
407 .deinit = NULL,
408 .enable = NULL,
409 .disable = NULL,
410 .is_enabled = NULL,
411 .reset = NULL,
412 .flush = NULL,
413 .poll = NULL,
414 .bind_channel = NULL,
415 .unbind_channel = NULL,
416 .max_entries = NULL,
417 },
418#endif /* CONFIG_GK20A_CTXSW_TRACE */
419 .mm = {
420 /* FIXME: add support for sparse mappings */
421 .support_sparse = NULL,
422 .gmmu_map = vgpu_gp10b_locked_gmmu_map,
423 .gmmu_unmap = vgpu_locked_gmmu_unmap,
424 .vm_bind_channel = vgpu_vm_bind_channel,
425 .fb_flush = vgpu_mm_fb_flush,
426 .l2_invalidate = vgpu_mm_l2_invalidate,
427 .l2_flush = vgpu_mm_l2_flush,
428 .cbc_clean = gk20a_mm_cbc_clean,
429 .set_big_page_size = gm20b_mm_set_big_page_size,
430 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
431 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
432 .gpu_phys_addr = gm20b_gpu_phys_addr,
433 .get_iommu_bit = gk20a_mm_get_iommu_bit,
434 .get_mmu_levels = gp10b_mm_get_mmu_levels,
435 .init_pdb = gp10b_mm_init_pdb,
436 .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw,
437 .is_bar1_supported = gv11b_mm_is_bar1_supported,
438 .init_inst_block = gv11b_init_inst_block,
439 .mmu_fault_pending = gv11b_mm_mmu_fault_pending,
440 .get_kind_invalid = gm20b_get_kind_invalid,
441 .get_kind_pitch = gm20b_get_kind_pitch,
442 .init_bar2_vm = gb10b_init_bar2_vm,
443 .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
444 .remove_bar2_vm = gv11b_mm_remove_bar2_vm,
445 .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
446 },
447 .therm = {
448 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
449 .elcg_init_idle_filters = gv11b_elcg_init_idle_filters,
450 },
451 .pmu = {
452 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
453 .pmu_get_queue_head = pwr_pmu_queue_head_r,
454 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
455 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
456 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
457 .pmu_queue_head = gk20a_pmu_queue_head,
458 .pmu_queue_tail = gk20a_pmu_queue_tail,
459 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
460 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
461 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
462 .pmu_mutex_release = gk20a_pmu_mutex_release,
463 .write_dmatrfbase = gp10b_write_dmatrfbase,
464 .pmu_elpg_statistics = gp106_pmu_elpg_statistics,
465 .pmu_pg_init_param = gv11b_pg_gr_init,
466 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
467 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
468 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
469 .reset_engine = gp106_pmu_engine_reset,
470 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
471 .pmu_nsbootstrap = gv11b_pmu_bootstrap,
472 .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask,
473 .is_pmu_supported = gv11b_is_pmu_supported,
474 },
475 .regops = {
476 .get_global_whitelist_ranges =
477 gv11b_get_global_whitelist_ranges,
478 .get_global_whitelist_ranges_count =
479 gv11b_get_global_whitelist_ranges_count,
480 .get_context_whitelist_ranges =
481 gv11b_get_context_whitelist_ranges,
482 .get_context_whitelist_ranges_count =
483 gv11b_get_context_whitelist_ranges_count,
484 .get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
485 .get_runcontrol_whitelist_count =
486 gv11b_get_runcontrol_whitelist_count,
487 .get_runcontrol_whitelist_ranges =
488 gv11b_get_runcontrol_whitelist_ranges,
489 .get_runcontrol_whitelist_ranges_count =
490 gv11b_get_runcontrol_whitelist_ranges_count,
491 .get_qctl_whitelist = gv11b_get_qctl_whitelist,
492 .get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
493 .get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges,
494 .get_qctl_whitelist_ranges_count =
495 gv11b_get_qctl_whitelist_ranges_count,
496 .apply_smpc_war = gv11b_apply_smpc_war,
497 },
498 .mc = {
499 .intr_enable = mc_gv11b_intr_enable,
500 .intr_unit_config = mc_gp10b_intr_unit_config,
501 .isr_stall = mc_gp10b_isr_stall,
502 .intr_stall = mc_gp10b_intr_stall,
503 .intr_stall_pause = mc_gp10b_intr_stall_pause,
504 .intr_stall_resume = mc_gp10b_intr_stall_resume,
505 .intr_nonstall = mc_gp10b_intr_nonstall,
506 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
507 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
508 .enable = gk20a_mc_enable,
509 .disable = gk20a_mc_disable,
510 .reset = gk20a_mc_reset,
511 .boot_0 = gk20a_mc_boot_0,
512 .is_intr1_pending = mc_gp10b_is_intr1_pending,
513 .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
514 },
515 .debug = {
516 .show_dump = NULL,
517 },
518 .dbg_session_ops = {
519 .exec_reg_ops = vgpu_exec_regops,
520 .dbg_set_powergate = vgpu_dbg_set_powergate,
521 .check_and_set_global_reservation =
522 vgpu_check_and_set_global_reservation,
523 .check_and_set_context_reservation =
524 vgpu_check_and_set_context_reservation,
525 .release_profiler_reservation =
526 vgpu_release_profiler_reservation,
527 .perfbuffer_enable = vgpu_perfbuffer_enable,
528 .perfbuffer_disable = vgpu_perfbuffer_disable,
529 },
530 .bus = {
531 .init_hw = gk20a_bus_init_hw,
532 .isr = gk20a_bus_isr,
533 .read_ptimer = vgpu_read_ptimer,
534 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
535 .bar1_bind = NULL,
536 },
537#if defined(CONFIG_GK20A_CYCLE_STATS)
538 .css = {
539 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
540 .disable_snapshot = vgpu_css_release_snapshot_buffer,
541 .check_data_available = vgpu_css_flush_snapshots,
542 .set_handled_snapshots = NULL,
543 .allocate_perfmon_ids = NULL,
544 .release_perfmon_ids = NULL,
545 },
546#endif
547 .falcon = {
548 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
549 },
550 .priv_ring = {
551 .isr = gp10b_priv_ring_isr,
552 },
553 .chip_init_gpu_characteristics = vgpu_gv11b_init_gpu_characteristics,
554 .get_litter_value = gv11b_get_litter_value,
555};
556
557int vgpu_gv11b_init_hal(struct gk20a *g)
558{
559 struct gpu_ops *gops = &g->ops;
560 u32 val;
561 bool priv_security;
562
563 gops->ltc = vgpu_gv11b_ops.ltc;
564 gops->ce2 = vgpu_gv11b_ops.ce2;
565 gops->gr = vgpu_gv11b_ops.gr;
566 gops->fb = vgpu_gv11b_ops.fb;
567 gops->clock_gating = vgpu_gv11b_ops.clock_gating;
568 gops->fifo = vgpu_gv11b_ops.fifo;
569 gops->gr_ctx = vgpu_gv11b_ops.gr_ctx;
570 gops->mm = vgpu_gv11b_ops.mm;
571 gops->fecs_trace = vgpu_gv11b_ops.fecs_trace;
572 gops->therm = vgpu_gv11b_ops.therm;
573 gops->pmu = vgpu_gv11b_ops.pmu;
574 gops->regops = vgpu_gv11b_ops.regops;
575 gops->mc = vgpu_gv11b_ops.mc;
576 gops->debug = vgpu_gv11b_ops.debug;
577 gops->dbg_session_ops = vgpu_gv11b_ops.dbg_session_ops;
578 gops->bus = vgpu_gv11b_ops.bus;
579#if defined(CONFIG_GK20A_CYCLE_STATS)
580 gops->css = vgpu_gv11b_ops.css;
581#endif
582 gops->falcon = vgpu_gv11b_ops.falcon;
583 gops->priv_ring = vgpu_gv11b_ops.priv_ring;
584
585 /* Lone functions */
586 gops->chip_init_gpu_characteristics =
587 vgpu_gv11b_ops.chip_init_gpu_characteristics;
588 gops->get_litter_value = vgpu_gv11b_ops.get_litter_value;
589
590 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
591 if (val) {
592 priv_security = true;
593 pr_err("priv security is enabled\n");
594 } else {
595 priv_security = false;
596 pr_err("priv security is disabled\n");
597 }
598 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
599 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security);
600 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security);
601
602 /* priv security dependent ops */
603 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
604 /* Add in ops from gm20b acr */
605 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
606 gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
607 gops->pmu.get_wpr = gm20b_wpr_info,
608 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
609 gops->pmu.pmu_populate_loader_cfg =
610 gp106_pmu_populate_loader_cfg,
611 gops->pmu.flcn_populate_bl_dmem_desc =
612 gp106_flcn_populate_bl_dmem_desc,
613 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
614 gops->pmu.falcon_clear_halt_interrupt_status =
615 clear_halt_interrupt_status,
616 gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
617
618 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
619 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
620 gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap,
621 gops->pmu.is_priv_load = gv11b_is_priv_load,
622
623 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
624 } else {
625 /* Inherit from gk20a */
626 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
627 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
628
629 gops->pmu.load_lsfalcon_ucode = NULL;
630 gops->pmu.init_wpr_region = NULL;
631 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
632
633 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
634 }
635
636 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
637 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
638
639 g->name = "gv11b";
640
641 return 0;
642}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
deleted file mode 100644
index 857e58c4..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <vgpu/vgpu.h>
25#include <linux/tegra_vgpu.h>
26
27int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
28{
29 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
30 struct tegra_vgpu_cmd_msg msg = {};
31 struct tegra_vgpu_alloc_ctx_header_params *p =
32 &msg.params.t19x.alloc_ctx_header;
33 struct gr_gk20a *gr = &c->g->gr;
34 int err;
35
36 msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER;
37 msg.handle = vgpu_get_handle(c->g);
38 p->ch_handle = c->virt_ctx;
39 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm,
40 gr->ctx_vars.golden_image_size,
41 gmmu_page_size_kernel);
42 if (!p->ctx_header_va) {
43 nvgpu_err(c->g, "alloc va failed for ctx_header");
44 return -ENOMEM;
45 }
46 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
47 err = err ? err : msg.ret;
48 if (unlikely(err)) {
49 nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
50 __nvgpu_vm_free_va(c->vm, p->ctx_header_va,
51 gmmu_page_size_kernel);
52 return err;
53 }
54 ctx->mem.gpu_va = p->ctx_header_va;
55
56 return err;
57}
58
59void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
60{
61 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
62 struct tegra_vgpu_cmd_msg msg = {};
63 struct tegra_vgpu_free_ctx_header_params *p =
64 &msg.params.t19x.free_ctx_header;
65 int err;
66
67 if (ctx->mem.gpu_va) {
68 msg.cmd = TEGRA_VGPU_CMD_FREE_CTX_HEADER;
69 msg.handle = vgpu_get_handle(c->g);
70 p->ch_handle = c->virt_ctx;
71 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
72 err = err ? err : msg.ret;
73 if (unlikely(err))
74 nvgpu_err(c->g, "free ctx_header failed err %d", err);
75 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va,
76 gmmu_page_size_kernel);
77 ctx->mem.gpu_va = 0;
78 }
79}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h
deleted file mode 100644
index 0e09f4f6..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_SUBCTX_GV11B_H_
24#define _VGPU_SUBCTX_GV11B_H_
25
26struct channel_gk20a;
27
28int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c);
29void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c);
30
31#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
deleted file mode 100644
index 7e70272a..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/tegra_vgpu.h>
24#include <gk20a/gk20a.h>
25#include <vgpu/vgpu.h>
26
27#include "vgpu_tsg_gv11b.h"
28
29int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
30 struct channel_gk20a *ch)
31{
32 struct tegra_vgpu_cmd_msg msg = {};
33 struct tegra_vgpu_tsg_bind_channel_ex_params *p =
34 &msg.params.t19x.tsg_bind_channel_ex;
35 int err;
36
37 gk20a_dbg_fn("");
38
39 err = gk20a_tsg_bind_channel(tsg, ch);
40 if (err)
41 return err;
42
43 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX;
44 msg.handle = vgpu_get_handle(tsg->g);
45 p->tsg_id = tsg->tsgid;
46 p->ch_handle = ch->virt_ctx;
47 p->subctx_id = ch->t19x.subctx_id;
48 p->runqueue_sel = ch->t19x.runqueue_sel;
49 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
50 err = err ? err : msg.ret;
51 if (err) {
52 nvgpu_err(tsg->g,
53 "vgpu_gv11b_tsg_bind_channel failed, ch %d tsgid %d",
54 ch->chid, tsg->tsgid);
55 gk20a_tsg_unbind_channel(ch);
56 }
57
58 return err;
59}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h
deleted file mode 100644
index c7bb2f4e..00000000
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_TSG_GV11B_H_
24#define _VGPU_TSG_GV11B_H_
25
26int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
27 struct channel_gk20a *ch);
28
29#endif
diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
deleted file mode 100644
index fb9558e2..00000000
--- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Virtualized GPU L2
3 *
4 * Copyright (c) 2014-2017 NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "vgpu/vgpu.h"
26#include "vgpu/ltc_vgpu.h"
27
28int vgpu_determine_L2_size_bytes(struct gk20a *g)
29{
30 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
31
32 gk20a_dbg_fn("");
33
34 return priv->constants.l2_size;
35}
36
37int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
38{
39 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
40 u32 max_comptag_lines = 0;
41 int err;
42
43 gk20a_dbg_fn("");
44
45 gr->cacheline_size = priv->constants.cacheline_size;
46 gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline;
47 gr->slices_per_ltc = priv->constants.slices_per_ltc;
48 max_comptag_lines = priv->constants.comptag_lines;
49
50 if (max_comptag_lines < 2)
51 return -ENXIO;
52
53 err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines);
54 if (err)
55 return err;
56
57 return 0;
58}
59
60void vgpu_ltc_init_fs_state(struct gk20a *g)
61{
62 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
63
64 gk20a_dbg_fn("");
65
66 g->ltc_count = priv->constants.ltc_count;
67}
diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.h b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.h
deleted file mode 100644
index 3437b4cb..00000000
--- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _LTC_VGPU_H_
24#define _LTC_VGPU_H_
25
26struct gk20a;
27struct gr_gk20a;
28
29int vgpu_determine_L2_size_bytes(struct gk20a *g);
30int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr);
31void vgpu_ltc_init_fs_state(struct gk20a *g);
32
33#endif
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
deleted file mode 100644
index 79d15d10..00000000
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ /dev/null
@@ -1,369 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/dma-mapping.h>
26#include <uapi/linux/nvgpu.h>
27
28#include <nvgpu/kmem.h>
29#include <nvgpu/dma.h>
30#include <nvgpu/bug.h>
31#include <nvgpu/vm.h>
32#include <nvgpu/vm_area.h>
33
34#include <nvgpu/vgpu/vm.h>
35
36#include <nvgpu/linux/vm.h>
37#include <nvgpu/linux/nvgpu_mem.h>
38
39#include "vgpu/vgpu.h"
40#include "vgpu/mm_vgpu.h"
41#include "gk20a/mm_gk20a.h"
42#include "gm20b/mm_gm20b.h"
43
44static int vgpu_init_mm_setup_sw(struct gk20a *g)
45{
46 struct mm_gk20a *mm = &g->mm;
47
48 gk20a_dbg_fn("");
49
50 if (mm->sw_ready) {
51 gk20a_dbg_fn("skip init");
52 return 0;
53 }
54
55 nvgpu_mutex_init(&mm->tlb_lock);
56 nvgpu_mutex_init(&mm->priv_lock);
57
58 mm->g = g;
59
60 /*TBD: make channel vm size configurable */
61 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
62 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
63
64 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB",
65 (int)(mm->channel.user_size >> 20),
66 (int)(mm->channel.kernel_size >> 20));
67
68 mm->sw_ready = true;
69
70 return 0;
71}
72
73int vgpu_init_mm_support(struct gk20a *g)
74{
75 int err;
76
77 gk20a_dbg_fn("");
78
79 err = vgpu_init_mm_setup_sw(g);
80 if (err)
81 return err;
82
83 if (g->ops.mm.init_mm_setup_hw)
84 err = g->ops.mm.init_mm_setup_hw(g);
85
86 return err;
87}
88
89u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
90 u64 map_offset,
91 struct nvgpu_sgt *sgt,
92 u64 buffer_offset,
93 u64 size,
94 int pgsz_idx,
95 u8 kind_v,
96 u32 ctag_offset,
97 u32 flags,
98 int rw_flag,
99 bool clear_ctags,
100 bool sparse,
101 bool priv,
102 struct vm_gk20a_mapping_batch *batch,
103 enum nvgpu_aperture aperture)
104{
105 int err = 0;
106 struct device *d = dev_from_vm(vm);
107 struct gk20a *g = gk20a_from_vm(vm);
108 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
109 struct tegra_vgpu_cmd_msg msg;
110 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
111 u64 addr = nvgpu_sgt_get_gpu_addr(g, sgt, sgt->sgl, NULL);
112 u8 prot;
113
114 gk20a_dbg_fn("");
115
116 /* Allocate (or validate when map_offset != 0) the virtual address. */
117 if (!map_offset) {
118 map_offset = __nvgpu_vm_alloc_va(vm, size,
119 pgsz_idx);
120 if (!map_offset) {
121 nvgpu_err(g, "failed to allocate va space");
122 err = -ENOMEM;
123 goto fail;
124 }
125 }
126
127 if (rw_flag == gk20a_mem_flag_read_only)
128 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
129 else if (rw_flag == gk20a_mem_flag_write_only)
130 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
131 else
132 prot = TEGRA_VGPU_MAP_PROT_NONE;
133
134 msg.cmd = TEGRA_VGPU_CMD_AS_MAP;
135 msg.handle = vgpu_get_handle(g);
136 p->handle = vm->handle;
137 p->addr = addr;
138 p->gpu_va = map_offset;
139 p->size = size;
140 if (pgsz_idx == gmmu_page_size_kernel) {
141 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
142
143 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
144 pgsz_idx = gmmu_page_size_small;
145 } else if (page_size ==
146 vm->gmmu_page_sizes[gmmu_page_size_big]) {
147 pgsz_idx = gmmu_page_size_big;
148 } else {
149 nvgpu_err(g, "invalid kernel page size %d",
150 page_size);
151 goto fail;
152 }
153 }
154 p->pgsz_idx = pgsz_idx;
155 p->iova = mapping ? 1 : 0;
156 p->kind = kind_v;
157 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
158 p->prot = prot;
159 p->ctag_offset = ctag_offset;
160 p->clear_ctags = clear_ctags;
161 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
162 err = err ? err : msg.ret;
163 if (err)
164 goto fail;
165
166 /* TLB invalidate handled on server side */
167
168 return map_offset;
169fail:
170 nvgpu_err(g, "%s: failed with err=%d", __func__, err);
171 return 0;
172}
173
174void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
175 u64 vaddr,
176 u64 size,
177 int pgsz_idx,
178 bool va_allocated,
179 int rw_flag,
180 bool sparse,
181 struct vm_gk20a_mapping_batch *batch)
182{
183 struct gk20a *g = gk20a_from_vm(vm);
184 struct tegra_vgpu_cmd_msg msg;
185 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
186 int err;
187
188 gk20a_dbg_fn("");
189
190 if (va_allocated) {
191 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
192 if (err) {
193 dev_err(dev_from_vm(vm),
194 "failed to free va");
195 return;
196 }
197 }
198
199 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
200 msg.handle = vgpu_get_handle(g);
201 p->handle = vm->handle;
202 p->gpu_va = vaddr;
203 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
204 if (err || msg.ret)
205 dev_err(dev_from_vm(vm),
206 "failed to update gmmu ptes on unmap");
207
208 /* TLB invalidate handled on server side */
209}
210
211/*
212 * This is called by the common VM init routine to handle vGPU specifics of
213 * intializing a VM on a vGPU. This alone is not enough to init a VM. See
214 * nvgpu_vm_init().
215 */
216int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
217{
218 struct tegra_vgpu_cmd_msg msg;
219 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
220 int err;
221
222 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
223 msg.handle = vgpu_get_handle(g);
224 p->size = vm->va_limit;
225 p->big_page_size = vm->big_page_size;
226
227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
228 if (err || msg.ret)
229 return -ENOMEM;
230
231 vm->handle = p->handle;
232
233 return 0;
234}
235
236/*
237 * Similar to vgpu_vm_init() this is called as part of the cleanup path for
238 * VMs. This alone is not enough to remove a VM - see nvgpu_vm_remove().
239 */
240void vgpu_vm_remove(struct vm_gk20a *vm)
241{
242 struct gk20a *g = gk20a_from_vm(vm);
243 struct tegra_vgpu_cmd_msg msg;
244 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
245 int err;
246
247 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
248 msg.handle = vgpu_get_handle(g);
249 p->handle = vm->handle;
250 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
251 WARN_ON(err || msg.ret);
252}
253
254u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
255{
256 struct dma_iommu_mapping *mapping =
257 to_dma_iommu_mapping(dev_from_gk20a(g));
258 u64 addr = nvgpu_mem_get_addr_sgl(g, (*sgt)->sgl);
259 struct tegra_vgpu_cmd_msg msg;
260 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
261 int err;
262
263 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1;
264 msg.handle = vgpu_get_handle(g);
265 p->addr = addr;
266 p->size = size;
267 p->iova = mapping ? 1 : 0;
268 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
269 if (err || msg.ret)
270 addr = 0;
271 else
272 addr = p->gpu_va;
273
274 return addr;
275}
276
277int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
278 struct channel_gk20a *ch)
279{
280 struct vm_gk20a *vm = as_share->vm;
281 struct tegra_vgpu_cmd_msg msg;
282 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
283 int err;
284
285 gk20a_dbg_fn("");
286
287 ch->vm = vm;
288 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
289 msg.handle = vgpu_get_handle(ch->g);
290 p->as_handle = vm->handle;
291 p->chan_handle = ch->virt_ctx;
292 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
293
294 if (err || msg.ret) {
295 ch->vm = NULL;
296 err = -ENOMEM;
297 }
298
299 if (ch->vm)
300 nvgpu_vm_get(ch->vm);
301
302 return err;
303}
304
305static void vgpu_cache_maint(u64 handle, u8 op)
306{
307 struct tegra_vgpu_cmd_msg msg;
308 struct tegra_vgpu_cache_maint_params *p = &msg.params.cache_maint;
309 int err;
310
311 msg.cmd = TEGRA_VGPU_CMD_CACHE_MAINT;
312 msg.handle = handle;
313 p->op = op;
314 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
315 WARN_ON(err || msg.ret);
316}
317
318int vgpu_mm_fb_flush(struct gk20a *g)
319{
320
321 gk20a_dbg_fn("");
322
323 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
324 return 0;
325}
326
327void vgpu_mm_l2_invalidate(struct gk20a *g)
328{
329
330 gk20a_dbg_fn("");
331
332 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
333}
334
335void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
336{
337 u8 op;
338
339 gk20a_dbg_fn("");
340
341 if (invalidate)
342 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
343 else
344 op = TEGRA_VGPU_L2_MAINT_FLUSH;
345
346 vgpu_cache_maint(vgpu_get_handle(g), op);
347}
348
349void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
350{
351 gk20a_dbg_fn("");
352
353 nvgpu_err(g, "call to RM server not supported");
354}
355
356void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
357{
358 struct tegra_vgpu_cmd_msg msg;
359 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
360 int err;
361
362 gk20a_dbg_fn("");
363
364 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
365 msg.handle = vgpu_get_handle(g);
366 p->enable = (u32)enable;
367 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
368 WARN_ON(err || msg.ret);
369}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h b/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
deleted file mode 100644
index ed66282c..00000000
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _MM_VGPU_H_
24#define _MM_VGPU_H_
25
26u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
27 u64 map_offset,
28 struct nvgpu_sgt *sgt,
29 u64 buffer_offset,
30 u64 size,
31 int pgsz_idx,
32 u8 kind_v,
33 u32 ctag_offset,
34 u32 flags,
35 int rw_flag,
36 bool clear_ctags,
37 bool sparse,
38 bool priv,
39 struct vm_gk20a_mapping_batch *batch,
40 enum nvgpu_aperture aperture);
41void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
42 u64 vaddr,
43 u64 size,
44 int pgsz_idx,
45 bool va_allocated,
46 int rw_flag,
47 bool sparse,
48 struct vm_gk20a_mapping_batch *batch);
49int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
50 struct channel_gk20a *ch);
51int vgpu_mm_fb_flush(struct gk20a *g);
52void vgpu_mm_l2_invalidate(struct gk20a *g);
53void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate);
54void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
55void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable);
56#endif
diff --git a/drivers/gpu/nvgpu/vgpu/sysfs_vgpu.c b/drivers/gpu/nvgpu/vgpu/sysfs_vgpu.c
deleted file mode 100644
index 30b503db..00000000
--- a/drivers/gpu/nvgpu/vgpu/sysfs_vgpu.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/device.h>
24
25#include "vgpu/vgpu.h"
26
27static ssize_t vgpu_load_show(struct device *dev,
28 struct device_attribute *attr,
29 char *buf)
30{
31 struct gk20a *g = get_gk20a(dev);
32 struct tegra_vgpu_cmd_msg msg = {0};
33 struct tegra_vgpu_gpu_load_params *p = &msg.params.gpu_load;
34 int err;
35
36 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_LOAD;
37 msg.handle = vgpu_get_handle(g);
38 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
39 if (err)
40 return err;
41
42 return snprintf(buf, PAGE_SIZE, "%u\n", p->load);
43}
44static DEVICE_ATTR(load, S_IRUGO, vgpu_load_show, NULL);
45
46void vgpu_create_sysfs(struct device *dev)
47{
48 if (device_create_file(dev, &dev_attr_load))
49 dev_err(dev, "Failed to create vgpu sysfs attributes!\n");
50}
51
52void vgpu_remove_sysfs(struct device *dev)
53{
54 device_remove_file(dev, &dev_attr_load);
55}
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
deleted file mode 100644
index 683317dc..00000000
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/tegra_vgpu.h>
24
25#include "gk20a/gk20a.h"
26#include "gk20a/channel_gk20a.h"
27#include "gk20a/tsg_gk20a.h"
28#include "common/linux/platform_gk20a.h"
29#include "vgpu.h"
30#include "fifo_vgpu.h"
31
32#include <nvgpu/bug.h>
33
34int vgpu_tsg_open(struct tsg_gk20a *tsg)
35{
36 struct tegra_vgpu_cmd_msg msg = {};
37 struct tegra_vgpu_tsg_open_params *p =
38 &msg.params.tsg_open;
39 int err;
40
41 gk20a_dbg_fn("");
42
43 msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN;
44 msg.handle = vgpu_get_handle(tsg->g);
45 p->tsg_id = tsg->tsgid;
46 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
47 err = err ? err : msg.ret;
48 if (err) {
49 nvgpu_err(tsg->g,
50 "vgpu_tsg_open failed, tsgid %d", tsg->tsgid);
51 }
52
53 return err;
54}
55
56int vgpu_enable_tsg(struct tsg_gk20a *tsg)
57{
58 struct gk20a *g = tsg->g;
59 struct channel_gk20a *ch;
60
61 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
62 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry)
63 g->ops.fifo.enable_channel(ch);
64 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
65
66 return 0;
67}
68
69int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
70 struct channel_gk20a *ch)
71{
72 struct tegra_vgpu_cmd_msg msg = {};
73 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
74 &msg.params.tsg_bind_unbind_channel;
75 int err;
76
77 gk20a_dbg_fn("");
78
79 err = gk20a_tsg_bind_channel(tsg, ch);
80 if (err)
81 return err;
82
83 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL;
84 msg.handle = vgpu_get_handle(tsg->g);
85 p->tsg_id = tsg->tsgid;
86 p->ch_handle = ch->virt_ctx;
87 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
88 err = err ? err : msg.ret;
89 if (err) {
90 nvgpu_err(tsg->g,
91 "vgpu_tsg_bind_channel failed, ch %d tsgid %d",
92 ch->chid, tsg->tsgid);
93 gk20a_tsg_unbind_channel(ch);
94 }
95
96 return err;
97}
98
99int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
100{
101 struct tegra_vgpu_cmd_msg msg = {};
102 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
103 &msg.params.tsg_bind_unbind_channel;
104 int err;
105
106 gk20a_dbg_fn("");
107
108 err = gk20a_tsg_unbind_channel(ch);
109 if (err)
110 return err;
111
112 msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL;
113 msg.handle = vgpu_get_handle(ch->g);
114 p->ch_handle = ch->virt_ctx;
115 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
116 err = err ? err : msg.ret;
117 WARN_ON(err);
118
119 return err;
120}
121
122int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
123{
124 struct tegra_vgpu_cmd_msg msg = {0};
125 struct tegra_vgpu_tsg_timeslice_params *p =
126 &msg.params.tsg_timeslice;
127 int err;
128
129 gk20a_dbg_fn("");
130
131 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE;
132 msg.handle = vgpu_get_handle(tsg->g);
133 p->tsg_id = tsg->tsgid;
134 p->timeslice_us = timeslice;
135 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
136 err = err ? err : msg.ret;
137 WARN_ON(err);
138 if (!err)
139 tsg->timeslice_us = timeslice;
140
141 return err;
142}
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
deleted file mode 100644
index 1153b540..00000000
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ /dev/null
@@ -1,782 +0,0 @@
1/*
2 * Virtualized GPU
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/pm_runtime.h>
28#include <linux/pm_qos.h>
29#include <soc/tegra/chip-id.h>
30#include <uapi/linux/nvgpu.h>
31
32#include <nvgpu/kmem.h>
33#include <nvgpu/bug.h>
34#include <nvgpu/enabled.h>
35#include <nvgpu/debug.h>
36#include <nvgpu/bus.h>
37#include <nvgpu/soc.h>
38#include <nvgpu/ctxsw_trace.h>
39
40#include "vgpu/vgpu.h"
41#include "vgpu/fecs_trace_vgpu.h"
42#include "vgpu/clk_vgpu.h"
43#include "gk20a/tsg_gk20a.h"
44#include "gk20a/channel_gk20a.h"
45#include "gm20b/hal_gm20b.h"
46
47#include "common/linux/module.h"
48#include "common/linux/os_linux.h"
49#include "common/linux/ioctl.h"
50#include "common/linux/scale.h"
51#include "common/linux/driver_common.h"
52
53#ifdef CONFIG_TEGRA_19x_GPU
54#include <vgpu/vgpu_t19x.h>
55#include <nvgpu_gpuid_t19x.h>
56#endif
57
58#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
59
60static inline int vgpu_comm_init(struct platform_device *pdev)
61{
62 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
63
64 return tegra_gr_comm_init(pdev, TEGRA_GR_COMM_CTX_CLIENT, 3,
65 queue_sizes, TEGRA_VGPU_QUEUE_CMD,
66 ARRAY_SIZE(queue_sizes));
67}
68
69static inline void vgpu_comm_deinit(void)
70{
71 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
72
73 tegra_gr_comm_deinit(TEGRA_GR_COMM_CTX_CLIENT, TEGRA_VGPU_QUEUE_CMD,
74 ARRAY_SIZE(queue_sizes));
75}
76
77int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
78 size_t size_out)
79{
80 void *handle;
81 size_t size = size_in;
82 void *data = msg;
83 int err;
84
85 err = tegra_gr_comm_sendrecv(TEGRA_GR_COMM_CTX_CLIENT,
86 tegra_gr_comm_get_server_vmid(),
87 TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
88 if (!err) {
89 WARN_ON(size < size_out);
90 memcpy(msg, data, size_out);
91 tegra_gr_comm_release(handle);
92 }
93
94 return err;
95}
96
97static u64 vgpu_connect(void)
98{
99 struct tegra_vgpu_cmd_msg msg;
100 struct tegra_vgpu_connect_params *p = &msg.params.connect;
101 int err;
102
103 msg.cmd = TEGRA_VGPU_CMD_CONNECT;
104 p->module = TEGRA_VGPU_MODULE_GPU;
105 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
106
107 return (err || msg.ret) ? 0 : p->handle;
108}
109
110int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
111{
112 struct tegra_vgpu_cmd_msg msg;
113 struct tegra_vgpu_attrib_params *p = &msg.params.attrib;
114 int err;
115
116 msg.cmd = TEGRA_VGPU_CMD_GET_ATTRIBUTE;
117 msg.handle = handle;
118 p->attrib = attrib;
119 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
120
121 if (err || msg.ret)
122 return -1;
123
124 *value = p->value;
125 return 0;
126}
127
128static void vgpu_handle_channel_event(struct gk20a *g,
129 struct tegra_vgpu_channel_event_info *info)
130{
131 if (info->id >= g->fifo.num_channels ||
132 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
133 nvgpu_err(g, "invalid channel event");
134 return;
135 }
136
137 if (info->is_tsg) {
138 struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
139
140 gk20a_tsg_event_id_post_event(tsg, info->event_id);
141 } else {
142 struct channel_gk20a *ch = &g->fifo.channel[info->id];
143
144 if (!gk20a_channel_get(ch)) {
145 nvgpu_err(g, "invalid channel %d for event %d",
146 (int)info->id, (int)info->event_id);
147 return;
148 }
149 gk20a_channel_event_id_post_event(ch, info->event_id);
150 gk20a_channel_put(ch);
151 }
152}
153
154
155
156static int vgpu_intr_thread(void *dev_id)
157{
158 struct gk20a *g = dev_id;
159 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
160
161 while (true) {
162 struct tegra_vgpu_intr_msg *msg;
163 u32 sender;
164 void *handle;
165 size_t size;
166 int err;
167
168 err = tegra_gr_comm_recv(TEGRA_GR_COMM_CTX_CLIENT,
169 TEGRA_VGPU_QUEUE_INTR, &handle,
170 (void **)&msg, &size, &sender);
171 if (err == -ETIME)
172 continue;
173 if (WARN_ON(err))
174 continue;
175
176 if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
177 tegra_gr_comm_release(handle);
178 break;
179 }
180
181 switch (msg->event) {
182 case TEGRA_VGPU_EVENT_INTR:
183 if (msg->unit == TEGRA_VGPU_INTR_GR)
184 vgpu_gr_isr(g, &msg->info.gr_intr);
185 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_GR)
186 vgpu_gr_nonstall_isr(g,
187 &msg->info.gr_nonstall_intr);
188 else if (msg->unit == TEGRA_VGPU_INTR_FIFO)
189 vgpu_fifo_isr(g, &msg->info.fifo_intr);
190 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_FIFO)
191 vgpu_fifo_nonstall_isr(g,
192 &msg->info.fifo_nonstall_intr);
193 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_CE2)
194 vgpu_ce2_nonstall_isr(g,
195 &msg->info.ce2_nonstall_intr);
196 break;
197 case TEGRA_VGPU_EVENT_FECS_TRACE:
198 vgpu_fecs_trace_data_update(g);
199 break;
200 case TEGRA_VGPU_EVENT_CHANNEL:
201 vgpu_handle_channel_event(g, &msg->info.channel_event);
202 break;
203 case TEGRA_VGPU_EVENT_SM_ESR:
204 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
205 break;
206 default:
207 nvgpu_err(g, "unknown event %u", msg->event);
208 break;
209 }
210
211 tegra_gr_comm_release(handle);
212 }
213
214 while (!nvgpu_thread_should_stop(&priv->intr_handler))
215 msleep(10);
216 return 0;
217}
218
219static void vgpu_remove_support(struct gk20a *g)
220{
221 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
222 struct vgpu_priv_data *priv =
223 vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
224 struct tegra_vgpu_intr_msg msg;
225 int err;
226
227 if (g->dbg_regops_tmp_buf)
228 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
229
230 if (g->pmu.remove_support)
231 g->pmu.remove_support(&g->pmu);
232
233 if (g->gr.remove_support)
234 g->gr.remove_support(&g->gr);
235
236 if (g->fifo.remove_support)
237 g->fifo.remove_support(&g->fifo);
238
239 if (g->mm.remove_support)
240 g->mm.remove_support(&g->mm);
241
242 msg.event = TEGRA_VGPU_EVENT_ABORT;
243 err = tegra_gr_comm_send(TEGRA_GR_COMM_CTX_CLIENT,
244 TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
245 &msg, sizeof(msg));
246 WARN_ON(err);
247 nvgpu_thread_stop(&priv->intr_handler);
248
249 /* free mappings to registers, etc*/
250
251 if (l->bar1) {
252 iounmap(l->bar1);
253 l->bar1 = NULL;
254 }
255}
256
257static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
258{
259 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
260
261 nvgpu_mutex_init(&g->poweron_lock);
262 nvgpu_mutex_init(&g->poweroff_lock);
263 l->regs_saved = l->regs;
264 l->bar1_saved = l->bar1;
265
266 nvgpu_init_list_node(&g->pending_sema_waits);
267 nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock);
268
269 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
270 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
271 g->has_syncpoints = platform->has_syncpoints;
272 g->ptimer_src_freq = platform->ptimer_src_freq;
273 g->can_railgate = platform->can_railgate_init;
274 g->railgate_delay = platform->railgate_delay_init;
275
276 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
277 platform->unify_address_spaces);
278}
279
280static int vgpu_init_support(struct platform_device *pdev)
281{
282 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
283 struct gk20a *g = get_gk20a(&pdev->dev);
284 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
285 void __iomem *regs;
286 int err = 0;
287
288 if (!r) {
289 nvgpu_err(g, "failed to get gk20a bar1");
290 err = -ENXIO;
291 goto fail;
292 }
293
294 if (r->name && !strcmp(r->name, "/vgpu")) {
295 regs = devm_ioremap_resource(&pdev->dev, r);
296 if (IS_ERR(regs)) {
297 nvgpu_err(g, "failed to remap gk20a bar1");
298 err = PTR_ERR(regs);
299 goto fail;
300 }
301 l->bar1 = regs;
302 l->bar1_mem = r;
303 }
304
305 nvgpu_mutex_init(&g->dbg_sessions_lock);
306 nvgpu_mutex_init(&g->client_lock);
307
308 nvgpu_init_list_node(&g->profiler_objects);
309
310 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
311 if (!g->dbg_regops_tmp_buf) {
312 nvgpu_err(g, "couldn't allocate regops tmp buf");
313 return -ENOMEM;
314 }
315 g->dbg_regops_tmp_buf_ops =
316 SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]);
317
318 g->remove_support = vgpu_remove_support;
319 return 0;
320
321 fail:
322 vgpu_remove_support(g);
323 return err;
324}
325
326int vgpu_pm_prepare_poweroff(struct device *dev)
327{
328 struct gk20a *g = get_gk20a(dev);
329 int ret = 0;
330
331 gk20a_dbg_fn("");
332
333 if (!g->power_on)
334 return 0;
335
336 ret = gk20a_channel_suspend(g);
337 if (ret)
338 return ret;
339
340 g->power_on = false;
341
342 return ret;
343}
344
345static void vgpu_detect_chip(struct gk20a *g)
346{
347 struct nvgpu_gpu_params *p = &g->params;
348 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
349
350 p->gpu_arch = priv->constants.arch;
351 p->gpu_impl = priv->constants.impl;
352 p->gpu_rev = priv->constants.rev;
353
354 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
355 p->gpu_arch,
356 p->gpu_impl,
357 p->gpu_rev);
358}
359
360int vgpu_init_gpu_characteristics(struct gk20a *g)
361{
362 int err;
363
364 gk20a_dbg_fn("");
365
366 err = gk20a_init_gpu_characteristics(g);
367 if (err)
368 return err;
369
370 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
371
372 /* features vgpu does not support */
373 __nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
374
375 return 0;
376}
377
378int vgpu_read_ptimer(struct gk20a *g, u64 *value)
379{
380 struct tegra_vgpu_cmd_msg msg = {0};
381 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
382 int err;
383
384 gk20a_dbg_fn("");
385
386 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
387 msg.handle = vgpu_get_handle(g);
388
389 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
390 err = err ? err : msg.ret;
391 if (!err)
392 *value = p->time;
393 else
394 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
395
396 return err;
397}
398
399int vgpu_get_timestamps_zipper(struct gk20a *g,
400 u32 source_id, u32 count,
401 struct nvgpu_cpu_time_correlation_sample *samples)
402{
403 struct tegra_vgpu_cmd_msg msg = {0};
404 struct tegra_vgpu_get_timestamps_zipper_params *p =
405 &msg.params.get_timestamps_zipper;
406 int err;
407 u32 i;
408
409 gk20a_dbg_fn("");
410
411 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
412 nvgpu_err(g, "count %u overflow", count);
413 return -EINVAL;
414 }
415
416 msg.cmd = TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER;
417 msg.handle = vgpu_get_handle(g);
418 p->source_id = TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC;
419 p->count = count;
420
421 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
422 err = err ? err : msg.ret;
423 if (err) {
424 nvgpu_err(g, "vgpu get timestamps zipper failed, err=%d", err);
425 return err;
426 }
427
428 for (i = 0; i < count; i++) {
429 samples[i].cpu_timestamp = p->samples[i].cpu_timestamp;
430 samples[i].gpu_timestamp = p->samples[i].gpu_timestamp;
431 }
432
433 return err;
434}
435
436static int vgpu_init_hal(struct gk20a *g)
437{
438 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
439 int err;
440
441 switch (ver) {
442 case GK20A_GPUID_GM20B:
443 case GK20A_GPUID_GM20B_B:
444 gk20a_dbg_info("gm20b detected");
445 err = vgpu_gm20b_init_hal(g);
446 break;
447 case NVGPU_GPUID_GP10B:
448 gk20a_dbg_info("gp10b detected");
449 err = vgpu_gp10b_init_hal(g);
450 break;
451#ifdef CONFIG_TEGRA_19x_GPU
452 case TEGRA_19x_GPUID:
453 err = vgpu_t19x_init_hal(g);
454 break;
455#endif
456 default:
457 nvgpu_err(g, "no support for %x", ver);
458 err = -ENODEV;
459 break;
460 }
461
462 return err;
463}
464
465int vgpu_pm_finalize_poweron(struct device *dev)
466{
467 struct gk20a *g = get_gk20a(dev);
468 int err;
469
470 gk20a_dbg_fn("");
471
472 if (g->power_on)
473 return 0;
474
475 g->power_on = true;
476
477 vgpu_detect_chip(g);
478 err = vgpu_init_hal(g);
479 if (err)
480 goto done;
481
482 if (g->ops.ltc.init_fs_state)
483 g->ops.ltc.init_fs_state(g);
484
485 err = vgpu_init_mm_support(g);
486 if (err) {
487 nvgpu_err(g, "failed to init gk20a mm");
488 goto done;
489 }
490
491 err = vgpu_init_fifo_support(g);
492 if (err) {
493 nvgpu_err(g, "failed to init gk20a fifo");
494 goto done;
495 }
496
497 err = vgpu_init_gr_support(g);
498 if (err) {
499 nvgpu_err(g, "failed to init gk20a gr");
500 goto done;
501 }
502
503 err = g->ops.chip_init_gpu_characteristics(g);
504 if (err) {
505 nvgpu_err(g, "failed to init gk20a gpu characteristics");
506 goto done;
507 }
508
509 gk20a_ctxsw_trace_init(g);
510 gk20a_sched_ctrl_init(g);
511 gk20a_channel_resume(g);
512
513done:
514 return err;
515}
516
517static int vgpu_qos_notify(struct notifier_block *nb,
518 unsigned long n, void *data)
519{
520 struct gk20a_scale_profile *profile =
521 container_of(nb, struct gk20a_scale_profile,
522 qos_notify_block);
523 struct gk20a *g = get_gk20a(profile->dev);
524 u32 max_freq;
525 int err;
526
527 gk20a_dbg_fn("");
528
529 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
530 err = vgpu_clk_cap_rate(profile->dev, max_freq);
531 if (err)
532 nvgpu_err(g, "%s failed, err=%d", __func__, err);
533
534 return NOTIFY_OK; /* need notify call further */
535}
536
537static int vgpu_pm_qos_init(struct device *dev)
538{
539 struct gk20a *g = get_gk20a(dev);
540 struct gk20a_scale_profile *profile = g->scale_profile;
541
542 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) {
543 if (!profile)
544 return -EINVAL;
545 } else {
546 profile = nvgpu_kzalloc(g, sizeof(*profile));
547 if (!profile)
548 return -ENOMEM;
549 g->scale_profile = profile;
550 }
551
552 profile->dev = dev;
553 profile->qos_notify_block.notifier_call = vgpu_qos_notify;
554 pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
555 &profile->qos_notify_block);
556 return 0;
557}
558
559static void vgpu_pm_qos_remove(struct device *dev)
560{
561 struct gk20a *g = get_gk20a(dev);
562
563 pm_qos_remove_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
564 &g->scale_profile->qos_notify_block);
565 nvgpu_kfree(g, g->scale_profile);
566 g->scale_profile = NULL;
567}
568
569static int vgpu_pm_init(struct device *dev)
570{
571 struct gk20a *g = get_gk20a(dev);
572 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
573 unsigned long *freqs;
574 int num_freqs;
575 int err = 0;
576
577 gk20a_dbg_fn("");
578
579 if (nvgpu_platform_is_simulation(g))
580 return 0;
581
582 __pm_runtime_disable(dev, false);
583
584 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
585 gk20a_scale_init(dev);
586
587 if (l->devfreq) {
588 /* set min/max frequency based on frequency table */
589 err = vgpu_clk_get_freqs(dev, &freqs, &num_freqs);
590 if (err)
591 return err;
592
593 if (num_freqs < 1)
594 return -EINVAL;
595
596 l->devfreq->min_freq = freqs[0];
597 l->devfreq->max_freq = freqs[num_freqs - 1];
598 }
599
600 err = vgpu_pm_qos_init(dev);
601 if (err)
602 return err;
603
604 return err;
605}
606
607static int vgpu_get_constants(struct gk20a *g)
608{
609 struct tegra_vgpu_cmd_msg msg = {};
610 struct tegra_vgpu_constants_params *p = &msg.params.constants;
611 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
612 int err;
613
614 gk20a_dbg_fn("");
615
616 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
617 msg.handle = vgpu_get_handle(g);
618 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
619 err = err ? err : msg.ret;
620
621 if (unlikely(err)) {
622 nvgpu_err(g, "%s failed, err=%d", __func__, err);
623 return err;
624 }
625
626 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
627 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
628 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
629 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
630 return -EINVAL;
631 }
632
633 priv->constants = *p;
634 return 0;
635}
636
637int vgpu_probe(struct platform_device *pdev)
638{
639 struct nvgpu_os_linux *l;
640 struct gk20a *gk20a;
641 int err;
642 struct device *dev = &pdev->dev;
643 struct gk20a_platform *platform = gk20a_get_platform(dev);
644 struct vgpu_priv_data *priv;
645
646 if (!platform) {
647 dev_err(dev, "no platform data\n");
648 return -ENODATA;
649 }
650
651 gk20a_dbg_fn("");
652
653 l = kzalloc(sizeof(*l), GFP_KERNEL);
654 if (!l) {
655 dev_err(dev, "couldn't allocate gk20a support");
656 return -ENOMEM;
657 }
658 gk20a = &l->g;
659 nvgpu_init_gk20a(gk20a);
660
661 nvgpu_kmem_init(gk20a);
662
663 err = nvgpu_init_enabled_flags(gk20a);
664 if (err) {
665 kfree(gk20a);
666 return err;
667 }
668
669 l->dev = dev;
670 if (tegra_platform_is_vdk())
671 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
672
673 gk20a->is_virtual = true;
674
675 priv = nvgpu_kzalloc(gk20a, sizeof(*priv));
676 if (!priv) {
677 kfree(gk20a);
678 return -ENOMEM;
679 }
680
681 platform->g = gk20a;
682 platform->vgpu_priv = priv;
683
684 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class);
685 if (err)
686 return err;
687
688 vgpu_init_support(pdev);
689
690 vgpu_init_vars(gk20a, platform);
691
692 init_rwsem(&l->busy_lock);
693
694 nvgpu_spinlock_init(&gk20a->mc_enable_lock);
695
696 gk20a->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
697
698 /* Initialize the platform interface. */
699 err = platform->probe(dev);
700 if (err) {
701 if (err == -EPROBE_DEFER)
702 dev_info(dev, "platform probe failed");
703 else
704 dev_err(dev, "platform probe failed");
705 return err;
706 }
707
708 if (platform->late_probe) {
709 err = platform->late_probe(dev);
710 if (err) {
711 dev_err(dev, "late probe failed");
712 return err;
713 }
714 }
715
716 err = vgpu_comm_init(pdev);
717 if (err) {
718 dev_err(dev, "failed to init comm interface\n");
719 return -ENOSYS;
720 }
721
722 priv->virt_handle = vgpu_connect();
723 if (!priv->virt_handle) {
724 dev_err(dev, "failed to connect to server node\n");
725 vgpu_comm_deinit();
726 return -ENOSYS;
727 }
728
729 err = vgpu_get_constants(gk20a);
730 if (err) {
731 vgpu_comm_deinit();
732 return err;
733 }
734
735 err = vgpu_pm_init(dev);
736 if (err) {
737 dev_err(dev, "pm init failed");
738 return err;
739 }
740
741 err = nvgpu_thread_create(&priv->intr_handler, gk20a,
742 vgpu_intr_thread, "gk20a");
743 if (err)
744 return err;
745
746 gk20a_debug_init(gk20a, "gpu.0");
747
748 /* Set DMA parameters to allow larger sgt lists */
749 dev->dma_parms = &l->dma_parms;
750 dma_set_max_seg_size(dev, UINT_MAX);
751
752 gk20a->gr_idle_timeout_default =
753 CONFIG_GK20A_DEFAULT_TIMEOUT;
754 gk20a->timeouts_enabled = true;
755
756 vgpu_create_sysfs(dev);
757 gk20a_init_gr(gk20a);
758
759 nvgpu_ref_init(&gk20a->refcount);
760
761 return 0;
762}
763
764int vgpu_remove(struct platform_device *pdev)
765{
766 struct device *dev = &pdev->dev;
767 struct gk20a *g = get_gk20a(dev);
768 gk20a_dbg_fn("");
769
770 vgpu_pm_qos_remove(dev);
771 if (g->remove_support)
772 g->remove_support(g);
773
774 vgpu_comm_deinit();
775 gk20a_sched_ctrl_cleanup(g);
776 gk20a_user_deinit(dev, &nvgpu_class);
777 vgpu_remove_sysfs(dev);
778 gk20a_get_platform(dev)->g = NULL;
779 gk20a_put(g);
780
781 return 0;
782}
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.h b/drivers/gpu/nvgpu/vgpu/vgpu.h
deleted file mode 100644
index dcfbddf2..00000000
--- a/drivers/gpu/nvgpu/vgpu/vgpu.h
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * Virtualized GPU Interfaces
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _VIRT_H_
26#define _VIRT_H_
27
28struct device;
29struct tegra_vgpu_gr_intr_info;
30struct tegra_vgpu_fifo_intr_info;
31struct tegra_vgpu_cmd_msg;
32struct gk20a_platform;
33
34#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
35#include <linux/tegra_gr_comm.h>
36#include <linux/tegra_vgpu.h>
37#include "gk20a/gk20a.h"
38#include "common/linux/platform_gk20a.h"
39#include "common/linux/os_linux.h"
40
41#include <nvgpu/thread.h>
42
43struct vgpu_priv_data {
44 u64 virt_handle;
45 struct nvgpu_thread intr_handler;
46 struct tegra_vgpu_constants_params constants;
47};
48
49static inline
50struct vgpu_priv_data *vgpu_get_priv_data_from_dev(struct device *dev)
51{
52 struct gk20a_platform *plat = gk20a_get_platform(dev);
53
54 return (struct vgpu_priv_data *)plat->vgpu_priv;
55}
56
57static inline struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
58{
59 return vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
60}
61
62static inline u64 vgpu_get_handle_from_dev(struct device *dev)
63{
64 struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev);
65
66 if (unlikely(!priv)) {
67 dev_err(dev, "invalid vgpu_priv_data in %s\n", __func__);
68 return INT_MAX;
69 }
70
71 return priv->virt_handle;
72}
73
74static inline u64 vgpu_get_handle(struct gk20a *g)
75{
76 return vgpu_get_handle_from_dev(dev_from_gk20a(g));
77}
78
79int vgpu_pm_prepare_poweroff(struct device *dev);
80int vgpu_pm_finalize_poweron(struct device *dev);
81int vgpu_probe(struct platform_device *dev);
82int vgpu_remove(struct platform_device *dev);
83u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size);
84int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
85int vgpu_gr_nonstall_isr(struct gk20a *g,
86 struct tegra_vgpu_gr_nonstall_intr_info *info);
87int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
88 struct gr_ctx_desc **__gr_ctx,
89 struct vm_gk20a *vm,
90 u32 class,
91 u32 flags);
92void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
93 struct gr_ctx_desc *gr_ctx);
94void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
95 struct tegra_vgpu_sm_esr_info *info);
96int vgpu_gr_init_ctx_state(struct gk20a *g);
97int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info);
98int vgpu_fifo_nonstall_isr(struct gk20a *g,
99 struct tegra_vgpu_fifo_nonstall_intr_info *info);
100int vgpu_ce2_nonstall_isr(struct gk20a *g,
101 struct tegra_vgpu_ce2_nonstall_intr_info *info);
102u32 vgpu_ce_get_num_pce(struct gk20a *g);
103int vgpu_init_mm_support(struct gk20a *g);
104int vgpu_init_gr_support(struct gk20a *g);
105int vgpu_init_fifo_support(struct gk20a *g);
106
107int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value);
108int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
109 size_t size_out);
110
111int vgpu_gm20b_init_hal(struct gk20a *g);
112int vgpu_gp10b_init_hal(struct gk20a *g);
113
114int vgpu_init_gpu_characteristics(struct gk20a *g);
115
116void vgpu_create_sysfs(struct device *dev);
117void vgpu_remove_sysfs(struct device *dev);
118int vgpu_read_ptimer(struct gk20a *g, u64 *value);
119int vgpu_get_timestamps_zipper(struct gk20a *g,
120 u32 source_id, u32 count,
121 struct nvgpu_cpu_time_correlation_sample *samples);
122#else
123static inline int vgpu_pm_prepare_poweroff(struct device *dev)
124{
125 return -ENOSYS;
126}
127static inline int vgpu_pm_finalize_poweron(struct device *dev)
128{
129 return -ENOSYS;
130}
131static inline int vgpu_probe(struct platform_device *dev)
132{
133 return -ENOSYS;
134}
135static inline int vgpu_remove(struct platform_device *dev)
136{
137 return -ENOSYS;
138}
139static inline u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt,
140 u64 size)
141{
142 return 0;
143}
144static inline int vgpu_gr_isr(struct gk20a *g,
145 struct tegra_vgpu_gr_intr_info *info)
146{
147 return 0;
148}
149static inline int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
150 struct gr_ctx_desc **__gr_ctx,
151 struct vm_gk20a *vm,
152 u32 class,
153 u32 flags)
154{
155 return -ENOSYS;
156}
157static inline void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
158 struct gr_ctx_desc *gr_ctx)
159{
160}
161static inline int vgpu_gr_init_ctx_state(struct gk20a *g)
162{
163 return -ENOSYS;
164}
165static inline int vgpu_fifo_isr(struct gk20a *g,
166 struct tegra_vgpu_fifo_intr_info *info)
167{
168 return 0;
169}
170static inline int vgpu_init_mm_support(struct gk20a *g)
171{
172 return -ENOSYS;
173}
174static inline int vgpu_init_gr_support(struct gk20a *g)
175{
176 return -ENOSYS;
177}
178static inline int vgpu_init_fifo_support(struct gk20a *g)
179{
180 return -ENOSYS;
181}
182
183static inline int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
184{
185 return -ENOSYS;
186}
187static inline int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
188 size_t size_out)
189{
190 return -ENOSYS;
191}
192#endif
193
194#endif
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h b/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h
deleted file mode 100644
index 8c020f80..00000000
--- a/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_T19X_H_
24#define _VGPU_T19X_H_
25
26struct gk20a;
27
28int vgpu_gv11b_init_hal(struct gk20a *g);
29
30#define vgpu_t19x_init_hal(g) vgpu_gv11b_init_hal(g)
31
32#define TEGRA_19x_VGPU_COMPAT_TEGRA "nvidia,gv11b-vgpu"
33extern struct gk20a_platform gv11b_vgpu_tegra_platform;
34#define t19x_vgpu_tegra_platform gv11b_vgpu_tegra_platform
35
36#endif