summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-10-31 17:40:19 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-31 19:23:07 -0500
commitb9b94c073ce551935be1c00cb8e756ad5ce5c631 (patch)
tree86e4c2b750e9a8eb0d23006ce4dce99906d95ab6 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent321537b8edaa9464381c70982470124e699a054a (diff)
gpu: nvgpu: Remove separate fixed address VMA
Remove the special VMA that could be used for allocating fixed addresses. This feature was never used and is not worth maintaining. Bug 1396644 Bug 1729947 Change-Id: I06f92caa01623535516935acc03ce38dbdb0e318 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1265302 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c42
1 files changed, 3 insertions, 39 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index bf73e79f..b04a7e87 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -4437,31 +4437,6 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4437 goto clean_up_page_tables; 4437 goto clean_up_page_tables;
4438 } 4438 }
4439 4439
4440 /*
4441 * Attempt to make a separate VM for fixed allocations.
4442 */
4443 if (g->separate_fixed_allocs &&
4444 user_vma_start < user_vma_limit) {
4445 if (g->separate_fixed_allocs >= user_vma_limit)
4446 goto clean_up_page_tables;
4447
4448 snprintf(alloc_name, sizeof(alloc_name),
4449 "gk20a_%s-fixed", name);
4450
4451 err = __nvgpu_buddy_allocator_init(g, &vm->fixed,
4452 vm, alloc_name,
4453 user_vma_start,
4454 g->separate_fixed_allocs,
4455 SZ_4K,
4456 GPU_BALLOC_MAX_ORDER,
4457 GPU_ALLOC_GVA_SPACE);
4458 if (err)
4459 goto clean_up_page_tables;
4460
4461 /* Make sure to update the user vma size. */
4462 user_vma_start = g->separate_fixed_allocs;
4463 }
4464
4465 if (user_vma_start < user_vma_limit) { 4440 if (user_vma_start < user_vma_limit) {
4466 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); 4441 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name);
4467 if (!gk20a_big_pages_possible(vm, user_vma_start, 4442 if (!gk20a_big_pages_possible(vm, user_vma_start,
@@ -4631,18 +4606,15 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4631 } 4606 }
4632 4607
4633 vma = vm->vma[pgsz_idx]; 4608 vma = vm->vma[pgsz_idx];
4634 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) { 4609 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
4635 if (nvgpu_alloc_initialized(&vm->fixed))
4636 vma = &vm->fixed;
4637 vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset, 4610 vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset,
4638 (u64)args->pages * 4611 (u64)args->pages *
4639 (u64)args->page_size, 4612 (u64)args->page_size,
4640 args->page_size); 4613 args->page_size);
4641 } else { 4614 else
4642 vaddr_start = nvgpu_alloc(vma, 4615 vaddr_start = nvgpu_alloc(vma,
4643 (u64)args->pages * 4616 (u64)args->pages *
4644 (u64)args->page_size); 4617 (u64)args->page_size);
4645 }
4646 4618
4647 if (!vaddr_start) { 4619 if (!vaddr_start) {
4648 kfree(va_node); 4620 kfree(va_node);
@@ -4710,10 +4682,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4710 pgsz_idx = __get_pte_size(vm, args->offset, 4682 pgsz_idx = __get_pte_size(vm, args->offset,
4711 args->page_size * args->pages); 4683 args->page_size * args->pages);
4712 4684
4713 if (nvgpu_alloc_initialized(&vm->fixed)) 4685 vma = vm->vma[pgsz_idx];
4714 vma = &vm->fixed;
4715 else
4716 vma = vm->vma[pgsz_idx];
4717 nvgpu_free(vma, args->offset); 4686 nvgpu_free(vma, args->offset);
4718 4687
4719 mutex_lock(&vm->update_gmmu_lock); 4688 mutex_lock(&vm->update_gmmu_lock);
@@ -4902,8 +4871,6 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
4902 nvgpu_alloc_destroy(&vm->kernel); 4871 nvgpu_alloc_destroy(&vm->kernel);
4903 if (nvgpu_alloc_initialized(&vm->user)) 4872 if (nvgpu_alloc_initialized(&vm->user))
4904 nvgpu_alloc_destroy(&vm->user); 4873 nvgpu_alloc_destroy(&vm->user);
4905 if (nvgpu_alloc_initialized(&vm->fixed))
4906 nvgpu_alloc_destroy(&vm->fixed);
4907 4874
4908 gk20a_vm_free_entries(vm, &vm->pdb, 0); 4875 gk20a_vm_free_entries(vm, &vm->pdb, 0);
4909} 4876}
@@ -5467,9 +5434,6 @@ void gk20a_mm_debugfs_init(struct device *dev)
5467 struct dentry *gpu_root = platform->debugfs; 5434 struct dentry *gpu_root = platform->debugfs;
5468 struct gk20a *g = gk20a_get_platform(dev)->g; 5435 struct gk20a *g = gk20a_get_platform(dev)->g;
5469 5436
5470 debugfs_create_x64("separate_fixed_allocs", 0664, gpu_root,
5471 &g->separate_fixed_allocs);
5472
5473 debugfs_create_bool("force_pramin", 0664, gpu_root, 5437 debugfs_create_bool("force_pramin", 0664, gpu_root,
5474 &g->mm.force_pramin); 5438 &g->mm.force_pramin);
5475} 5439}