summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/as.c63
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c27
2 files changed, 85 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 3182642a..481fb807 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -16,8 +16,10 @@
16#include <trace/events/gk20a.h> 16#include <trace/events/gk20a.h>
17 17
18#include <nvgpu/kmem.h> 18#include <nvgpu/kmem.h>
19#include <nvgpu/vm.h>
19 20
20#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
22#include "gk20a/platform_gk20a.h"
21 23
22/* dumb allocator... */ 24/* dumb allocator... */
23static int generate_as_share_id(struct gk20a_as *as) 25static int generate_as_share_id(struct gk20a_as *as)
@@ -32,6 +34,51 @@ static void release_as_share_id(struct gk20a_as *as, int id)
32 return; 34 return;
33} 35}
34 36
37/* address space interfaces for the gk20a module */
38static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
39 u32 big_page_size, u32 flags)
40{
41 struct gk20a_as *as = as_share->as;
42 struct gk20a *g = gk20a_from_as(as);
43 struct mm_gk20a *mm = &g->mm;
44 struct vm_gk20a *vm;
45 char name[32];
46 int err;
47 const bool userspace_managed =
48 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
49
50 gk20a_dbg_fn("");
51
52 if (big_page_size == 0) {
53 big_page_size =
54 gk20a_get_platform(g->dev)->default_big_page_size;
55 } else {
56 if (!is_power_of_2(big_page_size))
57 return -EINVAL;
58
59 if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes))
60 return -EINVAL;
61 }
62
63 vm = nvgpu_kzalloc(g, sizeof(*vm));
64 if (!vm)
65 return -ENOMEM;
66
67 as_share->vm = vm;
68 vm->as_share = as_share;
69 vm->enable_ctag = true;
70
71 snprintf(name, sizeof(name), "as_%d", as_share->id);
72
73 err = nvgpu_init_vm(mm, vm, big_page_size,
74 big_page_size << 10,
75 mm->channel.kernel_size,
76 mm->channel.user_size + mm->channel.kernel_size,
77 !mm->disable_bigpage, userspace_managed, name);
78
79 return err;
80}
81
35int gk20a_as_alloc_share(struct gk20a *g, 82int gk20a_as_alloc_share(struct gk20a *g,
36 u32 big_page_size, u32 flags, 83 u32 big_page_size, u32 flags,
37 struct gk20a_as_share **out) 84 struct gk20a_as_share **out)
@@ -56,7 +103,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
56 err = gk20a_busy(g); 103 err = gk20a_busy(g);
57 if (err) 104 if (err)
58 goto failed; 105 goto failed;
59 err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags); 106 err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
60 gk20a_idle(g); 107 gk20a_idle(g);
61 108
62 if (err) 109 if (err)
@@ -70,6 +117,20 @@ failed:
70 return err; 117 return err;
71} 118}
72 119
120int gk20a_vm_release_share(struct gk20a_as_share *as_share)
121{
122 struct vm_gk20a *vm = as_share->vm;
123
124 gk20a_dbg_fn("");
125
126 vm->as_share = NULL;
127 as_share->vm = NULL;
128
129 nvgpu_vm_put(vm);
130
131 return 0;
132}
133
73/* 134/*
74 * channels and the device nodes call this to release. 135 * channels and the device nodes call this to release.
75 * once the ref_cnt hits zero the share is deleted. 136 * once the ref_cnt hits zero the share is deleted.
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index b957e755..171a67ca 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -24,6 +24,8 @@
24#include <nvgpu/semaphore.h> 24#include <nvgpu/semaphore.h>
25#include <nvgpu/enabled.h> 25#include <nvgpu/enabled.h>
26 26
27#include <nvgpu/vgpu/vm.h>
28
27#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
28#include "gk20a/mm_gk20a.h" 30#include "gk20a/mm_gk20a.h"
29 31
@@ -209,10 +211,11 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
209 * @vm - The VM to init. 211 * @vm - The VM to init.
210 * @big_page_size - Size of big pages associated with this VM. 212 * @big_page_size - Size of big pages associated with this VM.
211 * @low_hole - The size of the low hole (unaddressable memory at the bottom of 213 * @low_hole - The size of the low hole (unaddressable memory at the bottom of
212 * the address space. 214 * the address space).
213 * @kernel_reserved - Space reserved for kernel only allocations. 215 * @kernel_reserved - Space reserved for kernel only allocations.
214 * @aperture_size - Total size of the aperture. 216 * @aperture_size - Total size of the aperture.
215 * @big_pages - Ignored. Will be set based on other passed params. 217 * @big_pages - If true then big pages are possible in the VM. Note this does
218 * not guarantee that big pages will be possible.
216 * @name - Name of the address space. 219 * @name - Name of the address space.
217 * 220 *
218 * This function initializes an address space according to the following map: 221 * This function initializes an address space according to the following map:
@@ -284,10 +287,21 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
284 vm->userspace_managed = userspace_managed; 287 vm->userspace_managed = userspace_managed;
285 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); 288 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
286 289
290#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
291 if (g->is_virtual && userspace_managed) {
292 nvgpu_err(g, "vGPU: no userspace managed addr space support");
293 return -ENOSYS;
294 }
295 if (g->is_virtual && vgpu_vm_init(g, vm)) {
296 nvgpu_err(g, "Failed to init vGPU VM!");
297 return -ENOMEM;
298 }
299#endif
300
287 /* Initialize the page table data structures. */ 301 /* Initialize the page table data structures. */
288 err = nvgpu_vm_init_page_tables(vm); 302 err = nvgpu_vm_init_page_tables(vm);
289 if (err) 303 if (err)
290 return err; 304 goto clean_up_vgpu_vm;
291 305
292 /* Setup vma limits. */ 306 /* Setup vma limits. */
293 if (kernel_reserved + low_hole < aperture_size) { 307 if (kernel_reserved + low_hole < aperture_size) {
@@ -445,6 +459,11 @@ clean_up_page_tables:
445 /* Cleans up nvgpu_vm_init_page_tables() */ 459 /* Cleans up nvgpu_vm_init_page_tables() */
446 nvgpu_vfree(g, vm->pdb.entries); 460 nvgpu_vfree(g, vm->pdb.entries);
447 free_gmmu_pages(vm, &vm->pdb); 461 free_gmmu_pages(vm, &vm->pdb);
462clean_up_vgpu_vm:
463#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
464 if (g->is_virtual)
465 vgpu_vm_remove(vm);
466#endif
448 return err; 467 return err;
449} 468}
450 469
@@ -503,7 +522,7 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
503 522
504#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 523#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
505 if (g->is_virtual) 524 if (g->is_virtual)
506 nvgpu_vm_remove_vgpu(vm); 525 vgpu_vm_remove(vm);
507#endif 526#endif
508 527
509 nvgpu_mutex_release(&vm->update_gmmu_lock); 528 nvgpu_mutex_release(&vm->update_gmmu_lock);