summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c278
1 files changed, 278 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
new file mode 100644
index 00000000..21496906
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -0,0 +1,278 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/kmem.h>
26#include <nvgpu/dma.h>
27#include <nvgpu/bug.h>
28#include <nvgpu/vm.h>
29#include <nvgpu/vm_area.h>
30
31#include <nvgpu/vgpu/vm.h>
32#include <nvgpu/vgpu/vgpu.h>
33
34#include <nvgpu/linux/vm.h>
35#include <nvgpu/linux/nvgpu_mem.h>
36
37#include "mm_vgpu.h"
38#include "gk20a/gk20a.h"
39#include "gk20a/mm_gk20a.h"
40#include "gm20b/mm_gm20b.h"
41
42static int vgpu_init_mm_setup_sw(struct gk20a *g)
43{
44 struct mm_gk20a *mm = &g->mm;
45
46 gk20a_dbg_fn("");
47
48 if (mm->sw_ready) {
49 gk20a_dbg_fn("skip init");
50 return 0;
51 }
52
53 nvgpu_mutex_init(&mm->tlb_lock);
54 nvgpu_mutex_init(&mm->priv_lock);
55
56 mm->g = g;
57
58 /*TBD: make channel vm size configurable */
59 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
60 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
61
62 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB",
63 (int)(mm->channel.user_size >> 20),
64 (int)(mm->channel.kernel_size >> 20));
65
66 mm->sw_ready = true;
67
68 return 0;
69}
70
71int vgpu_init_mm_support(struct gk20a *g)
72{
73 int err;
74
75 gk20a_dbg_fn("");
76
77 err = vgpu_init_mm_setup_sw(g);
78 if (err)
79 return err;
80
81 if (g->ops.mm.init_mm_setup_hw)
82 err = g->ops.mm.init_mm_setup_hw(g);
83
84 return err;
85}
86
87void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
88 u64 vaddr,
89 u64 size,
90 int pgsz_idx,
91 bool va_allocated,
92 int rw_flag,
93 bool sparse,
94 struct vm_gk20a_mapping_batch *batch)
95{
96 struct gk20a *g = gk20a_from_vm(vm);
97 struct tegra_vgpu_cmd_msg msg;
98 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
99 int err;
100
101 gk20a_dbg_fn("");
102
103 if (va_allocated) {
104 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
105 if (err) {
106 nvgpu_err(g, "failed to free va");
107 return;
108 }
109 }
110
111 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
112 msg.handle = vgpu_get_handle(g);
113 p->handle = vm->handle;
114 p->gpu_va = vaddr;
115 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
116 if (err || msg.ret)
117 nvgpu_err(g, "failed to update gmmu ptes on unmap");
118
119 /* TLB invalidate handled on server side */
120}
121
122/*
123 * This is called by the common VM init routine to handle vGPU specifics of
124 * intializing a VM on a vGPU. This alone is not enough to init a VM. See
125 * nvgpu_vm_init().
126 */
127int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
128{
129 struct tegra_vgpu_cmd_msg msg;
130 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
131 int err;
132
133 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
134 msg.handle = vgpu_get_handle(g);
135 p->size = vm->va_limit;
136 p->big_page_size = vm->big_page_size;
137
138 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
139 if (err || msg.ret)
140 return -ENOMEM;
141
142 vm->handle = p->handle;
143
144 return 0;
145}
146
147/*
148 * Similar to vgpu_vm_init() this is called as part of the cleanup path for
149 * VMs. This alone is not enough to remove a VM - see nvgpu_vm_remove().
150 */
151void vgpu_vm_remove(struct vm_gk20a *vm)
152{
153 struct gk20a *g = gk20a_from_vm(vm);
154 struct tegra_vgpu_cmd_msg msg;
155 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
156 int err;
157
158 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
159 msg.handle = vgpu_get_handle(g);
160 p->handle = vm->handle;
161 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
162 WARN_ON(err || msg.ret);
163}
164
165u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem)
166{
167 u64 addr = nvgpu_mem_get_addr(g, mem);
168 struct tegra_vgpu_cmd_msg msg;
169 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
170 int err;
171
172 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1;
173 msg.handle = vgpu_get_handle(g);
174 p->addr = addr;
175 p->size = mem->size;
176 p->iova = 0;
177 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
178 if (err || msg.ret)
179 addr = 0;
180 else
181 addr = p->gpu_va;
182
183 return addr;
184}
185
186int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
187 struct channel_gk20a *ch)
188{
189 struct vm_gk20a *vm = as_share->vm;
190 struct tegra_vgpu_cmd_msg msg;
191 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
192 int err;
193
194 gk20a_dbg_fn("");
195
196 ch->vm = vm;
197 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
198 msg.handle = vgpu_get_handle(ch->g);
199 p->as_handle = vm->handle;
200 p->chan_handle = ch->virt_ctx;
201 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
202
203 if (err || msg.ret) {
204 ch->vm = NULL;
205 err = -ENOMEM;
206 }
207
208 if (ch->vm)
209 nvgpu_vm_get(ch->vm);
210
211 return err;
212}
213
214static void vgpu_cache_maint(u64 handle, u8 op)
215{
216 struct tegra_vgpu_cmd_msg msg;
217 struct tegra_vgpu_cache_maint_params *p = &msg.params.cache_maint;
218 int err;
219
220 msg.cmd = TEGRA_VGPU_CMD_CACHE_MAINT;
221 msg.handle = handle;
222 p->op = op;
223 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
224 WARN_ON(err || msg.ret);
225}
226
227int vgpu_mm_fb_flush(struct gk20a *g)
228{
229
230 gk20a_dbg_fn("");
231
232 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
233 return 0;
234}
235
236void vgpu_mm_l2_invalidate(struct gk20a *g)
237{
238
239 gk20a_dbg_fn("");
240
241 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
242}
243
244void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
245{
246 u8 op;
247
248 gk20a_dbg_fn("");
249
250 if (invalidate)
251 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
252 else
253 op = TEGRA_VGPU_L2_MAINT_FLUSH;
254
255 vgpu_cache_maint(vgpu_get_handle(g), op);
256}
257
258void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
259{
260 gk20a_dbg_fn("");
261
262 nvgpu_err(g, "call to RM server not supported");
263}
264
265void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
266{
267 struct tegra_vgpu_cmd_msg msg;
268 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
269 int err;
270
271 gk20a_dbg_fn("");
272
273 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
274 msg.handle = vgpu_get_handle(g);
275 p->enable = (u32)enable;
276 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
277 WARN_ON(err || msg.ret);
278}