summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/as.c19
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c158
2 files changed, 87 insertions, 90 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 481fb807..99d18195 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -43,7 +43,6 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
43 struct mm_gk20a *mm = &g->mm; 43 struct mm_gk20a *mm = &g->mm;
44 struct vm_gk20a *vm; 44 struct vm_gk20a *vm;
45 char name[32]; 45 char name[32];
46 int err;
47 const bool userspace_managed = 46 const bool userspace_managed =
48 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0; 47 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
49 48
@@ -60,7 +59,13 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
60 return -EINVAL; 59 return -EINVAL;
61 } 60 }
62 61
63 vm = nvgpu_kzalloc(g, sizeof(*vm)); 62 snprintf(name, sizeof(name), "as_%d", as_share->id);
63
64 vm = nvgpu_vm_init(g, big_page_size,
65 big_page_size << 10,
66 mm->channel.kernel_size,
67 mm->channel.user_size + mm->channel.kernel_size,
68 !mm->disable_bigpage, userspace_managed, name);
64 if (!vm) 69 if (!vm)
65 return -ENOMEM; 70 return -ENOMEM;
66 71
@@ -68,15 +73,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
68 vm->as_share = as_share; 73 vm->as_share = as_share;
69 vm->enable_ctag = true; 74 vm->enable_ctag = true;
70 75
71 snprintf(name, sizeof(name), "as_%d", as_share->id); 76 return 0;
72
73 err = nvgpu_init_vm(mm, vm, big_page_size,
74 big_page_size << 10,
75 mm->channel.kernel_size,
76 mm->channel.user_size + mm->channel.kernel_size,
77 !mm->disable_bigpage, userspace_managed, name);
78
79 return err;
80} 77}
81 78
82int gk20a_as_alloc_share(struct gk20a *g, 79int gk20a_as_alloc_share(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 171a67ca..e24d40bf 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -204,52 +204,15 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
204 return 0; 204 return 0;
205} 205}
206 206
207/** 207static int __nvgpu_vm_init(struct mm_gk20a *mm,
208 * nvgpu_init_vm() - Initialize an address space. 208 struct vm_gk20a *vm,
209 * 209 u32 big_page_size,
210 * @mm - Parent MM. 210 u64 low_hole,
211 * @vm - The VM to init. 211 u64 kernel_reserved,
212 * @big_page_size - Size of big pages associated with this VM. 212 u64 aperture_size,
213 * @low_hole - The size of the low hole (unaddressable memory at the bottom of 213 bool big_pages,
214 * the address space). 214 bool userspace_managed,
215 * @kernel_reserved - Space reserved for kernel only allocations. 215 char *name)
216 * @aperture_size - Total size of the aperture.
217 * @big_pages - If true then big pages are possible in the VM. Note this does
218 * not guarantee that big pages will be possible.
219 * @name - Name of the address space.
220 *
221 * This function initializes an address space according to the following map:
222 *
223 * +--+ 0x0
224 * | |
225 * +--+ @low_hole
226 * | |
227 * ~ ~ This is the "user" section.
228 * | |
229 * +--+ @aperture_size - @kernel_reserved
230 * | |
231 * ~ ~ This is the "kernel" section.
232 * | |
233 * +--+ @aperture_size
234 *
235 * The user section is therefor what ever is left over after the @low_hole and
236 * @kernel_reserved memory have been portioned out. The @kernel_reserved is
237 * always persent at the top of the memory space and the @low_hole is always at
238 * the bottom.
239 *
240 * For certain address spaces a "user" section makes no sense (bar1, etc) so in
241 * such cases the @kernel_reserved and @low_hole should sum to exactly
242 * @aperture_size.
243 */
244int nvgpu_init_vm(struct mm_gk20a *mm,
245 struct vm_gk20a *vm,
246 u32 big_page_size,
247 u64 low_hole,
248 u64 kernel_reserved,
249 u64 aperture_size,
250 bool big_pages,
251 bool userspace_managed,
252 char *name)
253{ 216{
254 int err; 217 int err;
255 char alloc_name[32]; 218 char alloc_name[32];
@@ -257,7 +220,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
257 u64 user_vma_start, user_vma_limit; 220 u64 user_vma_start, user_vma_limit;
258 u64 user_lp_vma_start, user_lp_vma_limit; 221 u64 user_lp_vma_start, user_lp_vma_limit;
259 u64 kernel_vma_start, kernel_vma_limit; 222 u64 kernel_vma_start, kernel_vma_limit;
260 struct gk20a *g = mm->g; 223 struct gk20a *g = gk20a_from_mm(mm);
261 224
262 if (WARN_ON(kernel_reserved + low_hole > aperture_size)) 225 if (WARN_ON(kernel_reserved + low_hole > aperture_size))
263 return -ENOMEM; 226 return -ENOMEM;
@@ -467,22 +430,71 @@ clean_up_vgpu_vm:
467 return err; 430 return err;
468} 431}
469 432
470void nvgpu_deinit_vm(struct vm_gk20a *vm) 433/**
434 * nvgpu_init_vm() - Initialize an address space.
435 *
436 * @mm - Parent MM.
437 * @vm - The VM to init.
438 * @big_page_size - Size of big pages associated with this VM.
439 * @low_hole - The size of the low hole (unaddressable memory at the bottom of
440 * the address space).
441 * @kernel_reserved - Space reserved for kernel only allocations.
442 * @aperture_size - Total size of the aperture.
443 * @big_pages - If true then big pages are possible in the VM. Note this does
444 * not guarantee that big pages will be possible.
445 * @name - Name of the address space.
446 *
447 * This function initializes an address space according to the following map:
448 *
449 * +--+ 0x0
450 * | |
451 * +--+ @low_hole
452 * | |
453 * ~ ~ This is the "user" section.
454 * | |
455 * +--+ @aperture_size - @kernel_reserved
456 * | |
457 * ~ ~ This is the "kernel" section.
458 * | |
459 * +--+ @aperture_size
460 *
461 * The user section is therefor what ever is left over after the @low_hole and
462 * @kernel_reserved memory have been portioned out. The @kernel_reserved is
463 * always persent at the top of the memory space and the @low_hole is always at
464 * the bottom.
465 *
466 * For certain address spaces a "user" section makes no sense (bar1, etc) so in
467 * such cases the @kernel_reserved and @low_hole should sum to exactly
468 * @aperture_size.
469 */
470struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
471 u32 big_page_size,
472 u64 low_hole,
473 u64 kernel_reserved,
474 u64 aperture_size,
475 bool big_pages,
476 bool userspace_managed,
477 char *name)
471{ 478{
472 if (nvgpu_alloc_initialized(&vm->kernel)) 479 struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
473 nvgpu_alloc_destroy(&vm->kernel);
474 if (nvgpu_alloc_initialized(&vm->user))
475 nvgpu_alloc_destroy(&vm->user);
476 if (nvgpu_alloc_initialized(&vm->user_lp))
477 nvgpu_alloc_destroy(&vm->user_lp);
478 480
479 gk20a_vm_free_entries(vm, &vm->pdb, 0); 481 if (!vm)
482 return NULL;
483
484 if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole,
485 kernel_reserved, aperture_size, big_pages,
486 userspace_managed, name)) {
487 nvgpu_kfree(g, vm);
488 return NULL;
489 }
490
491 return vm;
480} 492}
481 493
482/* 494/*
483 * Cleanup the VM but don't nvgpu_kfree() on the vm pointer. 495 * Cleanup the VM!
484 */ 496 */
485void __nvgpu_vm_remove(struct vm_gk20a *vm) 497static void __nvgpu_vm_remove(struct vm_gk20a *vm)
486{ 498{
487 struct nvgpu_mapped_buf *mapped_buffer; 499 struct nvgpu_mapped_buf *mapped_buffer;
488 struct nvgpu_vm_area *vm_area, *vm_area_tmp; 500 struct nvgpu_vm_area *vm_area, *vm_area_tmp;
@@ -518,7 +530,14 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
518 nvgpu_kfree(vm->mm->g, vm_area); 530 nvgpu_kfree(vm->mm->g, vm_area);
519 } 531 }
520 532
521 nvgpu_deinit_vm(vm); 533 if (nvgpu_alloc_initialized(&vm->kernel))
534 nvgpu_alloc_destroy(&vm->kernel);
535 if (nvgpu_alloc_initialized(&vm->user))
536 nvgpu_alloc_destroy(&vm->user);
537 if (nvgpu_alloc_initialized(&vm->user_lp))
538 nvgpu_alloc_destroy(&vm->user_lp);
539
540 gk20a_vm_free_entries(vm, &vm->pdb, 0);
522 541
523#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 542#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
524 if (g->is_virtual) 543 if (g->is_virtual)
@@ -526,34 +545,15 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
526#endif 545#endif
527 546
528 nvgpu_mutex_release(&vm->update_gmmu_lock); 547 nvgpu_mutex_release(&vm->update_gmmu_lock);
529}
530 548
531/* 549 nvgpu_kfree(g, vm);
532 * Remove and nvgpu_kfree() the VM struct.
533 */
534void nvgpu_vm_remove(struct vm_gk20a *vm)
535{
536 __nvgpu_vm_remove(vm);
537
538 nvgpu_kfree(vm->mm->g, vm);
539}
540
541/*
542 * Note: this does not nvgpu_kfree() the vm. This might be a bug.
543 */
544void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
545{
546 struct gk20a *g = vm->mm->g;
547
548 gk20a_free_inst_block(g, inst_block);
549 __nvgpu_vm_remove(vm);
550} 550}
551 551
552static void __nvgpu_vm_remove_kref(struct kref *ref) 552static void __nvgpu_vm_remove_kref(struct kref *ref)
553{ 553{
554 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); 554 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref);
555 555
556 nvgpu_vm_remove(vm); 556 __nvgpu_vm_remove(vm);
557} 557}
558 558
559void nvgpu_vm_get(struct vm_gk20a *vm) 559void nvgpu_vm_get(struct vm_gk20a *vm)