summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-09 21:34:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-06 20:09:16 -0400
commitc21f5bca9ae81804130e30ea3e6f7a18d51203dc (patch)
treefb1a2d67532df19d70468610ad2a62c3464876c1 /drivers/gpu/nvgpu/common/mm/vm.c
parentc2b63150cd947557b8d17637258b988459b8e0ec (diff)
gpu: nvgpu: Remove extraneous VM init/deinit APIs
Support only VM pointers and ref-counting for maintaining VMs. This dramatically reduces the complexity of the APIs, avoids the API abuse that has existed, and ensures that future VM usage is consistent with current usage. Also remove the combined VM free/instance block deletion. Any place where this was done is now replaced with an explict free of the instance block and a nvgpu_vm_put(). JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ib73e8d574ecc9abf6dad0b40a2c5795d6396cc8c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1480227 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c158
1 files changed, 79 insertions, 79 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 171a67ca..e24d40bf 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -204,52 +204,15 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
204 return 0; 204 return 0;
205} 205}
206 206
207/** 207static int __nvgpu_vm_init(struct mm_gk20a *mm,
208 * nvgpu_init_vm() - Initialize an address space. 208 struct vm_gk20a *vm,
209 * 209 u32 big_page_size,
210 * @mm - Parent MM. 210 u64 low_hole,
211 * @vm - The VM to init. 211 u64 kernel_reserved,
212 * @big_page_size - Size of big pages associated with this VM. 212 u64 aperture_size,
213 * @low_hole - The size of the low hole (unaddressable memory at the bottom of 213 bool big_pages,
214 * the address space). 214 bool userspace_managed,
215 * @kernel_reserved - Space reserved for kernel only allocations. 215 char *name)
216 * @aperture_size - Total size of the aperture.
217 * @big_pages - If true then big pages are possible in the VM. Note this does
218 * not guarantee that big pages will be possible.
219 * @name - Name of the address space.
220 *
221 * This function initializes an address space according to the following map:
222 *
223 * +--+ 0x0
224 * | |
225 * +--+ @low_hole
226 * | |
227 * ~ ~ This is the "user" section.
228 * | |
229 * +--+ @aperture_size - @kernel_reserved
230 * | |
231 * ~ ~ This is the "kernel" section.
232 * | |
233 * +--+ @aperture_size
234 *
235 * The user section is therefor what ever is left over after the @low_hole and
236 * @kernel_reserved memory have been portioned out. The @kernel_reserved is
237 * always persent at the top of the memory space and the @low_hole is always at
238 * the bottom.
239 *
240 * For certain address spaces a "user" section makes no sense (bar1, etc) so in
241 * such cases the @kernel_reserved and @low_hole should sum to exactly
242 * @aperture_size.
243 */
244int nvgpu_init_vm(struct mm_gk20a *mm,
245 struct vm_gk20a *vm,
246 u32 big_page_size,
247 u64 low_hole,
248 u64 kernel_reserved,
249 u64 aperture_size,
250 bool big_pages,
251 bool userspace_managed,
252 char *name)
253{ 216{
254 int err; 217 int err;
255 char alloc_name[32]; 218 char alloc_name[32];
@@ -257,7 +220,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
257 u64 user_vma_start, user_vma_limit; 220 u64 user_vma_start, user_vma_limit;
258 u64 user_lp_vma_start, user_lp_vma_limit; 221 u64 user_lp_vma_start, user_lp_vma_limit;
259 u64 kernel_vma_start, kernel_vma_limit; 222 u64 kernel_vma_start, kernel_vma_limit;
260 struct gk20a *g = mm->g; 223 struct gk20a *g = gk20a_from_mm(mm);
261 224
262 if (WARN_ON(kernel_reserved + low_hole > aperture_size)) 225 if (WARN_ON(kernel_reserved + low_hole > aperture_size))
263 return -ENOMEM; 226 return -ENOMEM;
@@ -467,22 +430,71 @@ clean_up_vgpu_vm:
467 return err; 430 return err;
468} 431}
469 432
470void nvgpu_deinit_vm(struct vm_gk20a *vm) 433/**
434 * nvgpu_init_vm() - Initialize an address space.
435 *
436 * @mm - Parent MM.
437 * @vm - The VM to init.
438 * @big_page_size - Size of big pages associated with this VM.
439 * @low_hole - The size of the low hole (unaddressable memory at the bottom of
440 * the address space).
441 * @kernel_reserved - Space reserved for kernel only allocations.
442 * @aperture_size - Total size of the aperture.
443 * @big_pages - If true then big pages are possible in the VM. Note this does
444 * not guarantee that big pages will be possible.
445 * @name - Name of the address space.
446 *
447 * This function initializes an address space according to the following map:
448 *
449 * +--+ 0x0
450 * | |
451 * +--+ @low_hole
452 * | |
453 * ~ ~ This is the "user" section.
454 * | |
455 * +--+ @aperture_size - @kernel_reserved
456 * | |
457 * ~ ~ This is the "kernel" section.
458 * | |
459 * +--+ @aperture_size
460 *
461 * The user section is therefor what ever is left over after the @low_hole and
462 * @kernel_reserved memory have been portioned out. The @kernel_reserved is
463 * always persent at the top of the memory space and the @low_hole is always at
464 * the bottom.
465 *
466 * For certain address spaces a "user" section makes no sense (bar1, etc) so in
467 * such cases the @kernel_reserved and @low_hole should sum to exactly
468 * @aperture_size.
469 */
470struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
471 u32 big_page_size,
472 u64 low_hole,
473 u64 kernel_reserved,
474 u64 aperture_size,
475 bool big_pages,
476 bool userspace_managed,
477 char *name)
471{ 478{
472 if (nvgpu_alloc_initialized(&vm->kernel)) 479 struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
473 nvgpu_alloc_destroy(&vm->kernel);
474 if (nvgpu_alloc_initialized(&vm->user))
475 nvgpu_alloc_destroy(&vm->user);
476 if (nvgpu_alloc_initialized(&vm->user_lp))
477 nvgpu_alloc_destroy(&vm->user_lp);
478 480
479 gk20a_vm_free_entries(vm, &vm->pdb, 0); 481 if (!vm)
482 return NULL;
483
484 if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole,
485 kernel_reserved, aperture_size, big_pages,
486 userspace_managed, name)) {
487 nvgpu_kfree(g, vm);
488 return NULL;
489 }
490
491 return vm;
480} 492}
481 493
482/* 494/*
483 * Cleanup the VM but don't nvgpu_kfree() on the vm pointer. 495 * Cleanup the VM!
484 */ 496 */
485void __nvgpu_vm_remove(struct vm_gk20a *vm) 497static void __nvgpu_vm_remove(struct vm_gk20a *vm)
486{ 498{
487 struct nvgpu_mapped_buf *mapped_buffer; 499 struct nvgpu_mapped_buf *mapped_buffer;
488 struct nvgpu_vm_area *vm_area, *vm_area_tmp; 500 struct nvgpu_vm_area *vm_area, *vm_area_tmp;
@@ -518,7 +530,14 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
518 nvgpu_kfree(vm->mm->g, vm_area); 530 nvgpu_kfree(vm->mm->g, vm_area);
519 } 531 }
520 532
521 nvgpu_deinit_vm(vm); 533 if (nvgpu_alloc_initialized(&vm->kernel))
534 nvgpu_alloc_destroy(&vm->kernel);
535 if (nvgpu_alloc_initialized(&vm->user))
536 nvgpu_alloc_destroy(&vm->user);
537 if (nvgpu_alloc_initialized(&vm->user_lp))
538 nvgpu_alloc_destroy(&vm->user_lp);
539
540 gk20a_vm_free_entries(vm, &vm->pdb, 0);
522 541
523#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 542#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
524 if (g->is_virtual) 543 if (g->is_virtual)
@@ -526,34 +545,15 @@ void __nvgpu_vm_remove(struct vm_gk20a *vm)
526#endif 545#endif
527 546
528 nvgpu_mutex_release(&vm->update_gmmu_lock); 547 nvgpu_mutex_release(&vm->update_gmmu_lock);
529}
530 548
531/* 549 nvgpu_kfree(g, vm);
532 * Remove and nvgpu_kfree() the VM struct.
533 */
534void nvgpu_vm_remove(struct vm_gk20a *vm)
535{
536 __nvgpu_vm_remove(vm);
537
538 nvgpu_kfree(vm->mm->g, vm);
539}
540
541/*
542 * Note: this does not nvgpu_kfree() the vm. This might be a bug.
543 */
544void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
545{
546 struct gk20a *g = vm->mm->g;
547
548 gk20a_free_inst_block(g, inst_block);
549 __nvgpu_vm_remove(vm);
550} 550}
551 551
552static void __nvgpu_vm_remove_kref(struct kref *ref) 552static void __nvgpu_vm_remove_kref(struct kref *ref)
553{ 553{
554 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); 554 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref);
555 555
556 nvgpu_vm_remove(vm); 556 __nvgpu_vm_remove(vm);
557} 557}
558 558
559void nvgpu_vm_get(struct vm_gk20a *vm) 559void nvgpu_vm_get(struct vm_gk20a *vm)