summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-05 18:00:23 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-26 06:33:57 -0400
commit0bb47c3675d2030545d40353931e2b8120541de4 (patch)
tree1a23b45c1ac19dbc98e1d4a585822eb47b7dfeb2 /drivers/gpu/nvgpu/common/mm/vm.c
parentfbafc7eba41ba7654dfdadf51a53acf1638e9fa1 (diff)
gpu: nvgpu: Add and use VM init/deinit APIs
Remove the VM init/de-init from the HAL and instead use a single set of routines that init/de-init VMs. This prevents code divergence between vGPUs and regular GPUs. This patch also clears up the naming of the routines a little bit. Since some VMs are used inplace and others are dynamically allocated the APIs for freeing them were confusing. Also some free calls also clean up an instance block (this is API abuse - but this is how it currently exists). The new API looks like this: void __nvgpu_vm_remove(struct vm_gk20a *vm); void nvgpu_vm_remove(struct vm_gk20a *vm); void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm); int nvgpu_init_vm(struct mm_gk20a *mm, struct vm_gk20a *vm, u32 big_page_size, u64 low_hole, u64 kernel_reserved, u64 aperture_size, bool big_pages, bool userspace_managed, char *name); void nvgpu_deinit_vm(struct vm_gk20a *vm); JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ia4016384c54746bfbcaa4bdd0d29d03d5d7f7f1b Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477747 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index e42c7c5a..bdc8554c 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -442,7 +442,22 @@ clean_up_page_tables:
442 return err; 442 return err;
443} 443}
444 444
445void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) 445void nvgpu_deinit_vm(struct vm_gk20a *vm)
446{
447 if (nvgpu_alloc_initialized(&vm->kernel))
448 nvgpu_alloc_destroy(&vm->kernel);
449 if (nvgpu_alloc_initialized(&vm->user))
450 nvgpu_alloc_destroy(&vm->user);
451 if (nvgpu_alloc_initialized(&vm->user_lp))
452 nvgpu_alloc_destroy(&vm->user_lp);
453
454 gk20a_vm_free_entries(vm, &vm->pdb, 0);
455}
456
457/*
458 * Cleanup the VM but don't nvgpu_kfree() on the vm pointer.
459 */
460void __nvgpu_vm_remove(struct vm_gk20a *vm)
446{ 461{
447 struct nvgpu_mapped_buf *mapped_buffer; 462 struct nvgpu_mapped_buf *mapped_buffer;
448 struct nvgpu_vm_area *vm_area, *vm_area_tmp; 463 struct nvgpu_vm_area *vm_area, *vm_area_tmp;
@@ -480,22 +495,40 @@ void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm)
480 495
481 nvgpu_deinit_vm(vm); 496 nvgpu_deinit_vm(vm);
482 497
498#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
499 if (g->is_virtual)
500 nvgpu_vm_remove_vgpu(vm);
501#endif
502
483 nvgpu_mutex_release(&vm->update_gmmu_lock); 503 nvgpu_mutex_release(&vm->update_gmmu_lock);
484} 504}
485 505
486void nvgpu_vm_remove_support(struct vm_gk20a *vm) 506/*
507 * Remove and nvgpu_kfree() the VM struct.
508 */
509void nvgpu_vm_remove(struct vm_gk20a *vm)
487{ 510{
488 nvgpu_vm_remove_support_nofree(vm); 511 __nvgpu_vm_remove(vm);
489 /* vm is not used anymore. release it. */ 512
490 nvgpu_kfree(vm->mm->g, vm); 513 nvgpu_kfree(vm->mm->g, vm);
491} 514}
492 515
493static void nvgpu_vm_remove_support_kref(struct kref *ref) 516/*
517 * Note: this does not nvgpu_kfree() the vm. This might be a bug.
518 */
519void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
520{
521 struct gk20a *g = vm->mm->g;
522
523 gk20a_free_inst_block(g, inst_block);
524 __nvgpu_vm_remove(vm);
525}
526
527static void __nvgpu_vm_remove_kref(struct kref *ref)
494{ 528{
495 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); 529 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref);
496 struct gk20a *g = gk20a_from_vm(vm);
497 530
498 g->ops.mm.vm_remove(vm); 531 nvgpu_vm_remove(vm);
499} 532}
500 533
501void nvgpu_vm_get(struct vm_gk20a *vm) 534void nvgpu_vm_get(struct vm_gk20a *vm)
@@ -505,15 +538,7 @@ void nvgpu_vm_get(struct vm_gk20a *vm)
505 538
506void nvgpu_vm_put(struct vm_gk20a *vm) 539void nvgpu_vm_put(struct vm_gk20a *vm)
507{ 540{
508 kref_put(&vm->ref, nvgpu_vm_remove_support_kref); 541 kref_put(&vm->ref, __nvgpu_vm_remove_kref);
509}
510
511void nvgpu_vm_remove(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
512{
513 struct gk20a *g = vm->mm->g;
514
515 gk20a_free_inst_block(g, inst_block);
516 nvgpu_vm_remove_support_nofree(vm);
517} 542}
518 543
519int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, 544int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,