summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c272
1 files changed, 0 insertions, 272 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
deleted file mode 100644
index db3d6d03..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <nvgpu/kmem.h>
20#include <nvgpu/dma.h>
21#include <nvgpu/bug.h>
22#include <nvgpu/vm.h>
23#include <nvgpu/vm_area.h>
24
25#include <nvgpu/vgpu/vm.h>
26
27#include <nvgpu/linux/vm.h>
28#include <nvgpu/linux/nvgpu_mem.h>
29
30#include "vgpu.h"
31#include "mm_vgpu.h"
32#include "gk20a/gk20a.h"
33#include "gk20a/mm_gk20a.h"
34#include "gm20b/mm_gm20b.h"
35
36static int vgpu_init_mm_setup_sw(struct gk20a *g)
37{
38 struct mm_gk20a *mm = &g->mm;
39
40 gk20a_dbg_fn("");
41
42 if (mm->sw_ready) {
43 gk20a_dbg_fn("skip init");
44 return 0;
45 }
46
47 nvgpu_mutex_init(&mm->tlb_lock);
48 nvgpu_mutex_init(&mm->priv_lock);
49
50 mm->g = g;
51
52 /*TBD: make channel vm size configurable */
53 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
54 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
55
56 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB",
57 (int)(mm->channel.user_size >> 20),
58 (int)(mm->channel.kernel_size >> 20));
59
60 mm->sw_ready = true;
61
62 return 0;
63}
64
65int vgpu_init_mm_support(struct gk20a *g)
66{
67 int err;
68
69 gk20a_dbg_fn("");
70
71 err = vgpu_init_mm_setup_sw(g);
72 if (err)
73 return err;
74
75 if (g->ops.mm.init_mm_setup_hw)
76 err = g->ops.mm.init_mm_setup_hw(g);
77
78 return err;
79}
80
81void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
82 u64 vaddr,
83 u64 size,
84 int pgsz_idx,
85 bool va_allocated,
86 int rw_flag,
87 bool sparse,
88 struct vm_gk20a_mapping_batch *batch)
89{
90 struct gk20a *g = gk20a_from_vm(vm);
91 struct tegra_vgpu_cmd_msg msg;
92 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
93 int err;
94
95 gk20a_dbg_fn("");
96
97 if (va_allocated) {
98 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
99 if (err) {
100 nvgpu_err(g, "failed to free va");
101 return;
102 }
103 }
104
105 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
106 msg.handle = vgpu_get_handle(g);
107 p->handle = vm->handle;
108 p->gpu_va = vaddr;
109 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
110 if (err || msg.ret)
111 nvgpu_err(g, "failed to update gmmu ptes on unmap");
112
113 /* TLB invalidate handled on server side */
114}
115
116/*
117 * This is called by the common VM init routine to handle vGPU specifics of
118 * intializing a VM on a vGPU. This alone is not enough to init a VM. See
119 * nvgpu_vm_init().
120 */
121int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
122{
123 struct tegra_vgpu_cmd_msg msg;
124 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
125 int err;
126
127 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
128 msg.handle = vgpu_get_handle(g);
129 p->size = vm->va_limit;
130 p->big_page_size = vm->big_page_size;
131
132 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
133 if (err || msg.ret)
134 return -ENOMEM;
135
136 vm->handle = p->handle;
137
138 return 0;
139}
140
141/*
142 * Similar to vgpu_vm_init() this is called as part of the cleanup path for
143 * VMs. This alone is not enough to remove a VM - see nvgpu_vm_remove().
144 */
145void vgpu_vm_remove(struct vm_gk20a *vm)
146{
147 struct gk20a *g = gk20a_from_vm(vm);
148 struct tegra_vgpu_cmd_msg msg;
149 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
150 int err;
151
152 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
153 msg.handle = vgpu_get_handle(g);
154 p->handle = vm->handle;
155 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
156 WARN_ON(err || msg.ret);
157}
158
159u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem)
160{
161 u64 addr = nvgpu_mem_get_addr(g, mem);
162 struct tegra_vgpu_cmd_msg msg;
163 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
164 int err;
165
166 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1;
167 msg.handle = vgpu_get_handle(g);
168 p->addr = addr;
169 p->size = mem->size;
170 p->iova = 0;
171 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
172 if (err || msg.ret)
173 addr = 0;
174 else
175 addr = p->gpu_va;
176
177 return addr;
178}
179
180int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
181 struct channel_gk20a *ch)
182{
183 struct vm_gk20a *vm = as_share->vm;
184 struct tegra_vgpu_cmd_msg msg;
185 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
186 int err;
187
188 gk20a_dbg_fn("");
189
190 ch->vm = vm;
191 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
192 msg.handle = vgpu_get_handle(ch->g);
193 p->as_handle = vm->handle;
194 p->chan_handle = ch->virt_ctx;
195 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
196
197 if (err || msg.ret) {
198 ch->vm = NULL;
199 err = -ENOMEM;
200 }
201
202 if (ch->vm)
203 nvgpu_vm_get(ch->vm);
204
205 return err;
206}
207
208static void vgpu_cache_maint(u64 handle, u8 op)
209{
210 struct tegra_vgpu_cmd_msg msg;
211 struct tegra_vgpu_cache_maint_params *p = &msg.params.cache_maint;
212 int err;
213
214 msg.cmd = TEGRA_VGPU_CMD_CACHE_MAINT;
215 msg.handle = handle;
216 p->op = op;
217 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
218 WARN_ON(err || msg.ret);
219}
220
221int vgpu_mm_fb_flush(struct gk20a *g)
222{
223
224 gk20a_dbg_fn("");
225
226 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
227 return 0;
228}
229
230void vgpu_mm_l2_invalidate(struct gk20a *g)
231{
232
233 gk20a_dbg_fn("");
234
235 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
236}
237
238void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
239{
240 u8 op;
241
242 gk20a_dbg_fn("");
243
244 if (invalidate)
245 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
246 else
247 op = TEGRA_VGPU_L2_MAINT_FLUSH;
248
249 vgpu_cache_maint(vgpu_get_handle(g), op);
250}
251
252void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
253{
254 gk20a_dbg_fn("");
255
256 nvgpu_err(g, "call to RM server not supported");
257}
258
259void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
260{
261 struct tegra_vgpu_cmd_msg msg;
262 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
263 int err;
264
265 gk20a_dbg_fn("");
266
267 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
268 msg.handle = vgpu_get_handle(g);
269 p->enable = (u32)enable;
270 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
271 WARN_ON(err || msg.ret);
272}