summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/common/as.c63
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c27
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c62
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c1
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vgpu/vm.h25
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h5
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c197
9 files changed, 143 insertions, 241 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 3182642a..481fb807 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -16,8 +16,10 @@
16#include <trace/events/gk20a.h> 16#include <trace/events/gk20a.h>
17 17
18#include <nvgpu/kmem.h> 18#include <nvgpu/kmem.h>
19#include <nvgpu/vm.h>
19 20
20#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
22#include "gk20a/platform_gk20a.h"
21 23
22/* dumb allocator... */ 24/* dumb allocator... */
23static int generate_as_share_id(struct gk20a_as *as) 25static int generate_as_share_id(struct gk20a_as *as)
@@ -32,6 +34,51 @@ static void release_as_share_id(struct gk20a_as *as, int id)
32 return; 34 return;
33} 35}
34 36
37/* address space interfaces for the gk20a module */
38static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
39 u32 big_page_size, u32 flags)
40{
41 struct gk20a_as *as = as_share->as;
42 struct gk20a *g = gk20a_from_as(as);
43 struct mm_gk20a *mm = &g->mm;
44 struct vm_gk20a *vm;
45 char name[32];
46 int err;
47 const bool userspace_managed =
48 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
49
50 gk20a_dbg_fn("");
51
52 if (big_page_size == 0) {
53 big_page_size =
54 gk20a_get_platform(g->dev)->default_big_page_size;
55 } else {
56 if (!is_power_of_2(big_page_size))
57 return -EINVAL;
58
59 if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes))
60 return -EINVAL;
61 }
62
63 vm = nvgpu_kzalloc(g, sizeof(*vm));
64 if (!vm)
65 return -ENOMEM;
66
67 as_share->vm = vm;
68 vm->as_share = as_share;
69 vm->enable_ctag = true;
70
71 snprintf(name, sizeof(name), "as_%d", as_share->id);
72
73 err = nvgpu_init_vm(mm, vm, big_page_size,
74 big_page_size << 10,
75 mm->channel.kernel_size,
76 mm->channel.user_size + mm->channel.kernel_size,
77 !mm->disable_bigpage, userspace_managed, name);
78
79 return err;
80}
81
35int gk20a_as_alloc_share(struct gk20a *g, 82int gk20a_as_alloc_share(struct gk20a *g,
36 u32 big_page_size, u32 flags, 83 u32 big_page_size, u32 flags,
37 struct gk20a_as_share **out) 84 struct gk20a_as_share **out)
@@ -56,7 +103,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
56 err = gk20a_busy(g); 103 err = gk20a_busy(g);
57 if (err) 104 if (err)
58 goto failed; 105 goto failed;
59 err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags); 106 err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
60 gk20a_idle(g); 107 gk20a_idle(g);
61 108
62 if (err) 109 if (err)
@@ -70,6 +117,20 @@ failed:
70 return err; 117 return err;
71} 118}
72 119
120int gk20a_vm_release_share(struct gk20a_as_share *as_share)
121{
122 struct vm_gk20a *vm = as_share->vm;
123
124 gk20a_dbg_fn("");
125
126 vm->as_share = NULL;
127 as_share->vm = NULL;
128
129 nvgpu_vm_put(vm);
130
131 return 0;
132}
133
73/* 134/*
74 * channels and the device nodes call this to release. 135 * channels and the device nodes call this to release.
75 * once the ref_cnt hits zero the share is deleted. 136 * once the ref_cnt hits zero the share is deleted.
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index b957e755..171a67ca 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -24,6 +24,8 @@
24#include <nvgpu/semaphore.h> 24#include <nvgpu/semaphore.h>
25#include <nvgpu/enabled.h> 25#include <nvgpu/enabled.h>
26 26
27#include <nvgpu/vgpu/vm.h>
28
27#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
28#include "gk20a/mm_gk20a.h" 30#include "gk20a/mm_gk20a.h"
29 31
@@ -209,10 +211,11 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
209 * @vm - The VM to init. 211 * @vm - The VM to init.
210 * @big_page_size - Size of big pages associated with this VM. 212 * @big_page_size - Size of big pages associated with this VM.
211 * @low_hole - The size of the low hole (unaddressable memory at the bottom of 213 * @low_hole - The size of the low hole (unaddressable memory at the bottom of
212 * the address space. 214 * the address space).
213 * @kernel_reserved - Space reserved for kernel only allocations. 215 * @kernel_reserved - Space reserved for kernel only allocations.
214 * @aperture_size - Total size of the aperture. 216 * @aperture_size - Total size of the aperture.
215 * @big_pages - Ignored. Will be set based on other passed params. 217 * @big_pages - If true then big pages are possible in the VM. Note this does
218 * not guarantee that big pages will be possible.
216 * @name - Name of the address space. 219 * @name - Name of the address space.
217 * 220 *
218 * This function initializes an address space according to the following map: 221 * This function initializes an address space according to the following map:
@@ -284,10 +287,21 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
284 vm->userspace_managed = userspace_managed; 287 vm->userspace_managed = userspace_managed;
285 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); 288 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
286 289
290#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
291 if (g->is_virtual && userspace_managed) {
292 nvgpu_err(g, "vGPU: no userspace managed addr space support");
293 return -ENOSYS;
294 }
295 if (g->is_virtual && vgpu_vm_init(g, vm)) {
296 nvgpu_err(g, "Failed to init vGPU VM!");
297 return -ENOMEM;
298 }
299#endif
300
287 /* Initialize the page table data structures. */ 301 /* Initialize the page table data structures. */
288 err = nvgpu_vm_init_page_tables(vm); 302 err = nvgpu_vm_init_page_tables(vm);
289 if (err) 303 if (err)
290 return err; 304 goto clean_up_vgpu_vm;
291 305
292 /* Setup vma limits. */ 306 /* Setup vma limits. */
293 if (kernel_reserved + low_hole < aperture_size) { 307 if (kernel_reserved + low_hole < aperture_size) {
@@ -445,6 +459,11 @@ clean_up_page_tables:
445 /* Cleans up nvgpu_vm_init_page_tables() */ 459 /* Cleans up nvgpu_vm_init_page_tables() */
446 nvgpu_vfree(g, vm->pdb.entries); 460 nvgpu_vfree(g, vm->pdb.entries);
447 free_gmmu_pages(vm, &vm->pdb); 461 free_gmmu_pages(vm, &vm->pdb);
462clean_up_vgpu_vm:
463#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
464 if (g->is_virtual)
465 vgpu_vm_remove(vm);
466#endif
448 return err; 467 return err;
449} 468}
450 469
@@ -503,7 +522,7 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
503 522
504#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 523#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
505 if (g->is_virtual) 524 if (g->is_virtual)
506 nvgpu_vm_remove_vgpu(vm); 525 vgpu_vm_remove(vm);
507#endif 526#endif
508 527
509 nvgpu_mutex_release(&vm->update_gmmu_lock); 528 nvgpu_mutex_release(&vm->update_gmmu_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 4fc626e8..a02215d2 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -672,8 +672,6 @@ struct gpu_ops {
672 int rw_flag, 672 int rw_flag,
673 bool sparse, 673 bool sparse,
674 struct vm_gk20a_mapping_batch *batch); 674 struct vm_gk20a_mapping_batch *batch);
675 int (*vm_alloc_share)(struct gk20a_as_share *as_share,
676 u32 big_page_size, u32 flags);
677 int (*vm_bind_channel)(struct gk20a_as_share *as_share, 675 int (*vm_bind_channel)(struct gk20a_as_share *as_share,
678 struct channel_gk20a *ch); 676 struct channel_gk20a *ch);
679 int (*fb_flush)(struct gk20a *g); 677 int (*fb_flush)(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index ec020d5f..87e6f30c 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -702,8 +702,6 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
702 if (err) 702 if (err)
703 return err; 703 return err;
704 704
705 /* set vm_alloc_share op here as gk20a_as_alloc_share needs it */
706 g->ops.mm.vm_alloc_share = gk20a_vm_alloc_share;
707 mm->remove_support = gk20a_remove_mm_support; 705 mm->remove_support = gk20a_remove_mm_support;
708 mm->remove_ce_support = gk20a_remove_mm_ce_support; 706 mm->remove_ce_support = gk20a_remove_mm_ce_support;
709 707
@@ -2451,65 +2449,6 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
2451 return gmmu_page_size_small; 2449 return gmmu_page_size_small;
2452} 2450}
2453 2451
2454/* address space interfaces for the gk20a module */
2455int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size,
2456 u32 flags)
2457{
2458 struct gk20a_as *as = as_share->as;
2459 struct gk20a *g = gk20a_from_as(as);
2460 struct mm_gk20a *mm = &g->mm;
2461 struct vm_gk20a *vm;
2462 char name[32];
2463 int err;
2464 const bool userspace_managed =
2465 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
2466
2467 gk20a_dbg_fn("");
2468
2469 if (big_page_size == 0) {
2470 big_page_size =
2471 gk20a_get_platform(g->dev)->default_big_page_size;
2472 } else {
2473 if (!is_power_of_2(big_page_size))
2474 return -EINVAL;
2475
2476 if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes))
2477 return -EINVAL;
2478 }
2479
2480 vm = nvgpu_kzalloc(g, sizeof(*vm));
2481 if (!vm)
2482 return -ENOMEM;
2483
2484 as_share->vm = vm;
2485 vm->as_share = as_share;
2486 vm->enable_ctag = true;
2487
2488 snprintf(name, sizeof(name), "as_%d", as_share->id);
2489
2490 err = nvgpu_init_vm(mm, vm, big_page_size,
2491 big_page_size << 10,
2492 mm->channel.kernel_size,
2493 mm->channel.user_size + mm->channel.kernel_size,
2494 !mm->disable_bigpage, userspace_managed, name);
2495
2496 return err;
2497}
2498
2499int gk20a_vm_release_share(struct gk20a_as_share *as_share)
2500{
2501 struct vm_gk20a *vm = as_share->vm;
2502
2503 gk20a_dbg_fn("");
2504
2505 vm->as_share = NULL;
2506 as_share->vm = NULL;
2507
2508 nvgpu_vm_put(vm);
2509
2510 return 0;
2511}
2512
2513int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) 2452int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
2514{ 2453{
2515 int err = 0; 2454 int err = 0;
@@ -3130,7 +3069,6 @@ void gk20a_init_mm(struct gpu_ops *gops)
3130{ 3069{
3131 gops->mm.gmmu_map = gk20a_locked_gmmu_map; 3070 gops->mm.gmmu_map = gk20a_locked_gmmu_map;
3132 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; 3071 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap;
3133 gops->mm.vm_alloc_share = gk20a_vm_alloc_share;
3134 gops->mm.vm_bind_channel = gk20a_vm_bind_channel; 3072 gops->mm.vm_bind_channel = gk20a_vm_bind_channel;
3135 gops->mm.fb_flush = gk20a_mm_fb_flush; 3073 gops->mm.fb_flush = gk20a_mm_fb_flush;
3136 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate; 3074 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 94342818..16c35d34 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -416,8 +416,6 @@ int nvgpu_vm_get_compbits_info(struct vm_gk20a *vm,
416/* vm-as interface */ 416/* vm-as interface */
417struct nvgpu_as_alloc_space_args; 417struct nvgpu_as_alloc_space_args;
418struct nvgpu_as_free_space_args; 418struct nvgpu_as_free_space_args;
419int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size,
420 u32 flags);
421int gk20a_vm_release_share(struct gk20a_as_share *as_share); 419int gk20a_vm_release_share(struct gk20a_as_share *as_share);
422int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, 420int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
423 struct channel_gk20a *ch); 421 struct channel_gk20a *ch);
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 78e083d0..0595fe2e 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -60,7 +60,6 @@ void gm20b_init_mm(struct gpu_ops *gops)
60 gops->mm.support_sparse = gm20b_mm_support_sparse; 60 gops->mm.support_sparse = gm20b_mm_support_sparse;
61 gops->mm.gmmu_map = gk20a_locked_gmmu_map; 61 gops->mm.gmmu_map = gk20a_locked_gmmu_map;
62 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; 62 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap;
63 gops->mm.vm_alloc_share = gk20a_vm_alloc_share;
64 gops->mm.vm_bind_channel = gk20a_vm_bind_channel; 63 gops->mm.vm_bind_channel = gk20a_vm_bind_channel;
65 gops->mm.fb_flush = gk20a_mm_fb_flush; 64 gops->mm.fb_flush = gk20a_mm_fb_flush;
66 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate; 65 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vgpu/vm.h
new file mode 100644
index 00000000..364baac6
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/vgpu/vm.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __NVGPU_VM_VGPU_H__
18#define __NVGPU_VM_VGPU_H__
19
20#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
21int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm);
22void vgpu_vm_remove(struct vm_gk20a *vm);
23#endif
24
25#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index fed58f24..403f3b18 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -225,12 +225,10 @@ int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,
225void nvgpu_remove_mapped_buf(struct vm_gk20a *vm, 225void nvgpu_remove_mapped_buf(struct vm_gk20a *vm,
226 struct nvgpu_mapped_buf *mapped_buffer); 226 struct nvgpu_mapped_buf *mapped_buffer);
227 227
228void nvgpu_deinit_vm(struct vm_gk20a *vm);
228void __nvgpu_vm_remove(struct vm_gk20a *vm); 229void __nvgpu_vm_remove(struct vm_gk20a *vm);
229void nvgpu_vm_remove(struct vm_gk20a *vm); 230void nvgpu_vm_remove(struct vm_gk20a *vm);
230void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); 231void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block);
231#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
232void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm);
233#endif
234 232
235int nvgpu_init_vm(struct mm_gk20a *mm, 233int nvgpu_init_vm(struct mm_gk20a *mm,
236 struct vm_gk20a *vm, 234 struct vm_gk20a *vm,
@@ -241,7 +239,6 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
241 bool big_pages, 239 bool big_pages,
242 bool userspace_managed, 240 bool userspace_managed,
243 char *name); 241 char *name);
244void nvgpu_deinit_vm(struct vm_gk20a *vm);
245 242
246/* 243/*
247 * These are private to the VM code but are unfortunately used by the vgpu code. 244 * These are private to the VM code but are unfortunately used by the vgpu code.
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 287567d6..b2bc6f0a 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -21,6 +21,8 @@
21#include <nvgpu/vm.h> 21#include <nvgpu/vm.h>
22#include <nvgpu/vm_area.h> 22#include <nvgpu/vm_area.h>
23 23
24#include <nvgpu/vgpu/vm.h>
25
24#include "vgpu/vgpu.h" 26#include "vgpu/vgpu.h"
25#include "gk20a/mm_gk20a.h" 27#include "gk20a/mm_gk20a.h"
26 28
@@ -201,7 +203,36 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
201 /* TLB invalidate handled on server side */ 203 /* TLB invalidate handled on server side */
202} 204}
203 205
204void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm) 206/*
207 * This is called by the common VM init routine to handle vGPU specifics of
208 * intializing a VM on a vGPU. This alone is not enough to init a VM. See
209 * nvgpu_vm_init().
210 */
211int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
212{
213 struct tegra_vgpu_cmd_msg msg;
214 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
215 int err;
216
217 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
218 msg.handle = vgpu_get_handle(g);
219 p->size = vm->va_limit;
220 p->big_page_size = vm->big_page_size;
221
222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
223 if (err || msg.ret)
224 return -ENOMEM;
225
226 vm->handle = p->handle;
227
228 return 0;
229}
230
231/*
232 * Similar to vgpu_vm_init() this is called as part of the cleanup path for
233 * VMs. This alone is not enough to remove a VM - see nvgpu_vm_remove().
234 */
235void vgpu_vm_remove(struct vm_gk20a *vm)
205{ 236{
206 struct gk20a *g = gk20a_from_vm(vm); 237 struct gk20a *g = gk20a_from_vm(vm);
207 struct tegra_vgpu_cmd_msg msg; 238 struct tegra_vgpu_cmd_msg msg;
@@ -238,169 +269,6 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
238 return addr; 269 return addr;
239} 270}
240 271
241/* address space interfaces for the gk20a module */
242static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
243 u32 big_page_size, u32 flags)
244{
245 struct gk20a_as *as = as_share->as;
246 struct gk20a *g = gk20a_from_as(as);
247 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
248 struct tegra_vgpu_cmd_msg msg;
249 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
250 struct mm_gk20a *mm = &g->mm;
251 struct vm_gk20a *vm;
252 u64 user_vma_start, user_vma_limit, kernel_vma_start, kernel_vma_limit;
253 char name[32];
254 int err, i;
255 const bool userspace_managed =
256 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
257
258 /* note: keep the page sizes sorted lowest to highest here */
259 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = {
260 SZ_4K,
261 big_page_size ? big_page_size : platform->default_big_page_size,
262 SZ_4K
263 };
264
265 gk20a_dbg_fn("");
266
267 if (userspace_managed) {
268 nvgpu_err(g,
269 "userspace-managed address spaces not yet supported");
270 return -ENOSYS;
271 }
272
273 big_page_size = gmmu_page_sizes[gmmu_page_size_big];
274
275 vm = nvgpu_kzalloc(g, sizeof(*vm));
276 if (!vm)
277 return -ENOMEM;
278
279 as_share->vm = vm;
280
281 vm->mm = mm;
282 vm->as_share = as_share;
283
284 /* Set up vma pointers. */
285 vm->vma[0] = &vm->user;
286 vm->vma[1] = &vm->user;
287 vm->vma[2] = &vm->kernel;
288
289 for (i = 0; i < gmmu_nr_page_sizes; i++)
290 vm->gmmu_page_sizes[i] = gmmu_page_sizes[i];
291
292 vm->big_pages = !mm->disable_bigpage;
293 vm->big_page_size = big_page_size;
294
295 vm->va_start = big_page_size << 10; /* create a one pde hole */
296 vm->va_limit = mm->channel.user_size + mm->channel.kernel_size;
297
298 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
299 msg.handle = vgpu_get_handle(g);
300 p->size = vm->va_limit;
301 p->big_page_size = vm->big_page_size;
302 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
303 if (err || msg.ret) {
304 err = -ENOMEM;
305 goto clean_up;
306 }
307
308 vm->handle = p->handle;
309
310 /* setup vma limits */
311 user_vma_start = vm->va_start;
312 user_vma_limit = vm->va_limit - mm->channel.kernel_size;
313
314 kernel_vma_start = vm->va_limit - mm->channel.kernel_size;
315 kernel_vma_limit = vm->va_limit;
316
317 gk20a_dbg_info(
318 "user_vma=[0x%llx,0x%llx) kernel_vma=[0x%llx,0x%llx)\n",
319 user_vma_start, user_vma_limit,
320 kernel_vma_start, kernel_vma_limit);
321
322 WARN_ON(user_vma_start > user_vma_limit);
323 WARN_ON(kernel_vma_start >= kernel_vma_limit);
324
325 if (user_vma_start > user_vma_limit ||
326 kernel_vma_start >= kernel_vma_limit) {
327 err = -EINVAL;
328 goto clean_up_share;
329 }
330
331 if (user_vma_start < user_vma_limit) {
332 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
333 gmmu_page_sizes[gmmu_page_size_small] >> 10);
334 if (!nvgpu_big_pages_possible(vm, user_vma_start,
335 user_vma_limit - user_vma_start))
336 vm->big_pages = false;
337
338 err = __nvgpu_buddy_allocator_init(
339 g,
340 vm->vma[gmmu_page_size_small],
341 vm, name,
342 user_vma_start,
343 user_vma_limit - user_vma_start,
344 SZ_4K,
345 GPU_BALLOC_MAX_ORDER,
346 GPU_ALLOC_GVA_SPACE);
347 if (err)
348 goto clean_up_share;
349 } else {
350 /*
351 * Make these allocator pointers point to the kernel allocator
352 * since we still use the legacy notion of page size to choose
353 * the allocator.
354 */
355 vm->vma[0] = &vm->kernel;
356 vm->vma[1] = &vm->kernel;
357 }
358
359 snprintf(name, sizeof(name), "gk20a_as_%dKB-sys",
360 gmmu_page_sizes[gmmu_page_size_kernel] >> 10);
361 if (!nvgpu_big_pages_possible(vm, kernel_vma_start,
362 kernel_vma_limit - kernel_vma_start))
363 vm->big_pages = false;
364
365 /*
366 * kernel reserved VMA is at the end of the aperture
367 */
368 err = __nvgpu_buddy_allocator_init(
369 g,
370 vm->vma[gmmu_page_size_kernel],
371 vm, name,
372 kernel_vma_start,
373 kernel_vma_limit - kernel_vma_start,
374 SZ_4K,
375 GPU_BALLOC_MAX_ORDER,
376 GPU_ALLOC_GVA_SPACE);
377 if (err)
378 goto clean_up_user_allocator;
379
380 vm->mapped_buffers = NULL;
381
382 nvgpu_mutex_init(&vm->update_gmmu_lock);
383 kref_init(&vm->ref);
384 nvgpu_init_list_node(&vm->vm_area_list);
385
386 vm->enable_ctag = true;
387
388 return 0;
389
390clean_up_user_allocator:
391 if (user_vma_start < user_vma_limit)
392 nvgpu_alloc_destroy(&vm->user);
393clean_up_share:
394 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
395 msg.handle = vgpu_get_handle(g);
396 p->handle = vm->handle;
397 WARN_ON(vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)) || msg.ret);
398clean_up:
399 nvgpu_kfree(g, vm);
400 as_share->vm = NULL;
401 return err;
402}
403
404static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, 272static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
405 struct channel_gk20a *ch) 273 struct channel_gk20a *ch)
406{ 274{
@@ -501,7 +369,6 @@ void vgpu_init_mm_ops(struct gpu_ops *gops)
501 gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode; 369 gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode;
502 gops->mm.gmmu_map = vgpu_locked_gmmu_map; 370 gops->mm.gmmu_map = vgpu_locked_gmmu_map;
503 gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap; 371 gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap;
504 gops->mm.vm_alloc_share = vgpu_vm_alloc_share;
505 gops->mm.vm_bind_channel = vgpu_vm_bind_channel; 372 gops->mm.vm_bind_channel = vgpu_vm_bind_channel;
506 gops->mm.fb_flush = vgpu_mm_fb_flush; 373 gops->mm.fb_flush = vgpu_mm_fb_flush;
507 gops->mm.l2_invalidate = vgpu_mm_l2_invalidate; 374 gops->mm.l2_invalidate = vgpu_mm_l2_invalidate;