summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-10-31 15:33:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-31 19:23:07 -0500
commit321537b8edaa9464381c70982470124e699a054a (patch)
tree133cf11fa36b426dfe98ca13f332dcba50f412f8 /drivers
parentd630f1d99f60b1c2ec87506a2738bac4d1895b07 (diff)
gpu: nvgpu: Cleanup gk20a_init_vm()
Cleanup and simplify the gk20a_init_vm() function to ease the implementation of a platform dependent address space unification decision. Bug 1396644 Bug 1729947 Change-Id: Id8487d0e3d3c65e3357e3528063fb17c8a85f7da Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1265301 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c96
1 files changed, 50 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 83bbcb54..bf73e79f 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -4303,6 +4303,31 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
4303 return node->pgsz_idx; 4303 return node->pgsz_idx;
4304} 4304}
4305 4305
4306static int init_vm_page_tables(struct vm_gk20a *vm)
4307{
4308 u32 pde_lo, pde_hi;
4309 int err;
4310
4311 pde_range_from_vaddr_range(vm,
4312 0, vm->va_limit-1,
4313 &pde_lo, &pde_hi);
4314 vm->pdb.entries = vzalloc(sizeof(struct gk20a_mm_entry) *
4315 (pde_hi + 1));
4316 vm->pdb.num_entries = pde_hi + 1;
4317
4318 if (!vm->pdb.entries)
4319 return -ENOMEM;
4320
4321 err = gk20a_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0],
4322 &vm->pdb, NULL);
4323 if (err) {
4324 vfree(vm->pdb.entries);
4325 return err;
4326 }
4327
4328 return 0;
4329}
4330
4306/** 4331/**
4307 * gk20a_init_vm() - Initialize an address space. 4332 * gk20a_init_vm() - Initialize an address space.
4308 * 4333 *
@@ -4349,62 +4374,41 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4349 bool userspace_managed, 4374 bool userspace_managed,
4350 char *name) 4375 char *name)
4351{ 4376{
4352 int err, i; 4377 int err;
4353 char alloc_name[32]; 4378 char alloc_name[32];
4354 u64 user_vma_start, user_vma_limit, kernel_vma_start, kernel_vma_limit; 4379 u64 user_vma_start, user_vma_limit;
4355 u32 pde_lo, pde_hi; 4380 u64 kernel_vma_start, kernel_vma_limit;
4356 struct gk20a *g = mm->g; 4381 struct gk20a *g = mm->g;
4357 4382
4358 /* note: this must match gmmu_pgsz_gk20a enum */
4359 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, big_page_size, SZ_4K };
4360
4361 if (WARN_ON(kernel_reserved + low_hole > aperture_size)) 4383 if (WARN_ON(kernel_reserved + low_hole > aperture_size))
4362 return -ENOMEM; 4384 return -ENOMEM;
4363 4385
4386 gk20a_dbg_info("Init space for %s: va_limit=0x%llx",
4387 name, vm->va_limit);
4388
4364 vm->mm = mm; 4389 vm->mm = mm;
4365 4390
4391 vm->gmmu_page_sizes[gmmu_page_size_small] = SZ_4K;
4392 vm->gmmu_page_sizes[gmmu_page_size_big] = big_page_size;
4393 vm->gmmu_page_sizes[gmmu_page_size_kernel] = SZ_4K;
4394
4366 /* Set up vma pointers. */ 4395 /* Set up vma pointers. */
4367 vm->vma[0] = &vm->user; 4396 vm->vma[0] = &vm->user;
4368 vm->vma[1] = &vm->user; 4397 vm->vma[1] = &vm->user;
4369 vm->vma[2] = &vm->kernel; 4398 vm->vma[2] = &vm->kernel;
4370 4399
4371 vm->va_start = low_hole; 4400 vm->va_start = low_hole;
4372 vm->va_limit = aperture_size; 4401 vm->va_limit = aperture_size;
4373 vm->big_pages = big_pages; 4402 vm->big_pages = big_pages;
4374 4403
4375 vm->big_page_size = gmmu_page_sizes[gmmu_page_size_big]; 4404 vm->big_page_size = vm->gmmu_page_sizes[gmmu_page_size_big];
4376 vm->userspace_managed = userspace_managed; 4405 vm->userspace_managed = userspace_managed;
4377 vm->mmu_levels = vm->mm->g->ops.mm.get_mmu_levels(vm->mm->g, 4406 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, big_page_size);
4378 vm->big_page_size);
4379
4380 for (i = 0; i < gmmu_nr_page_sizes; i++)
4381 vm->gmmu_page_sizes[i] = gmmu_page_sizes[i];
4382 4407
4383 gk20a_dbg_info("small page-size (%dKB)", 4408 /* Initialize the page table data structures. */
4384 vm->gmmu_page_sizes[gmmu_page_size_small] >> 10); 4409 err = init_vm_page_tables(vm);
4385 gk20a_dbg_info("big page-size (%dKB) (%s)\n",
4386 vm->gmmu_page_sizes[gmmu_page_size_big] >> 10, name);
4387 gk20a_dbg_info("kernel page-size (%dKB)",
4388 vm->gmmu_page_sizes[gmmu_page_size_kernel] >> 10);
4389
4390 pde_range_from_vaddr_range(vm,
4391 0, vm->va_limit-1,
4392 &pde_lo, &pde_hi);
4393 vm->pdb.entries = vzalloc(sizeof(struct gk20a_mm_entry) *
4394 (pde_hi + 1));
4395 vm->pdb.num_entries = pde_hi + 1;
4396
4397 if (!vm->pdb.entries)
4398 return -ENOMEM;
4399
4400 gk20a_dbg_info("init space for %s va_limit=0x%llx num_pdes=%d",
4401 name, vm->va_limit, pde_hi + 1);
4402
4403 /* allocate the page table directory */
4404 err = gk20a_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0],
4405 &vm->pdb, NULL);
4406 if (err) 4410 if (err)
4407 goto clean_up_pdes; 4411 return err;
4408 4412
4409 /* setup vma limits */ 4413 /* setup vma limits */
4410 user_vma_start = low_hole; 4414 user_vma_start = low_hole;
@@ -4427,10 +4431,10 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4427 * user_vma_limit (i.e a 0 sized space). In such a situation the kernel 4431 * user_vma_limit (i.e a 0 sized space). In such a situation the kernel
4428 * area must be non-zero in length. 4432 * area must be non-zero in length.
4429 */ 4433 */
4430 if (user_vma_start > user_vma_limit || 4434 if (user_vma_start >= user_vma_limit &&
4431 kernel_vma_start >= kernel_vma_limit) { 4435 kernel_vma_start >= kernel_vma_limit) {
4432 err = -EINVAL; 4436 err = -EINVAL;
4433 goto clean_up_pdes; 4437 goto clean_up_page_tables;
4434 } 4438 }
4435 4439
4436 /* 4440 /*
@@ -4439,7 +4443,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4439 if (g->separate_fixed_allocs && 4443 if (g->separate_fixed_allocs &&
4440 user_vma_start < user_vma_limit) { 4444 user_vma_start < user_vma_limit) {
4441 if (g->separate_fixed_allocs >= user_vma_limit) 4445 if (g->separate_fixed_allocs >= user_vma_limit)
4442 goto clean_up_pdes; 4446 goto clean_up_page_tables;
4443 4447
4444 snprintf(alloc_name, sizeof(alloc_name), 4448 snprintf(alloc_name, sizeof(alloc_name),
4445 "gk20a_%s-fixed", name); 4449 "gk20a_%s-fixed", name);
@@ -4452,7 +4456,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4452 GPU_BALLOC_MAX_ORDER, 4456 GPU_BALLOC_MAX_ORDER,
4453 GPU_ALLOC_GVA_SPACE); 4457 GPU_ALLOC_GVA_SPACE);
4454 if (err) 4458 if (err)
4455 goto clean_up_ptes; 4459 goto clean_up_page_tables;
4456 4460
4457 /* Make sure to update the user vma size. */ 4461 /* Make sure to update the user vma size. */
4458 user_vma_start = g->separate_fixed_allocs; 4462 user_vma_start = g->separate_fixed_allocs;
@@ -4473,7 +4477,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4473 GPU_BALLOC_MAX_ORDER, 4477 GPU_BALLOC_MAX_ORDER,
4474 GPU_ALLOC_GVA_SPACE); 4478 GPU_ALLOC_GVA_SPACE);
4475 if (err) 4479 if (err)
4476 goto clean_up_ptes; 4480 goto clean_up_page_tables;
4477 } else { 4481 } else {
4478 /* 4482 /*
4479 * Make these allocator pointers point to the kernel allocator 4483 * Make these allocator pointers point to the kernel allocator
@@ -4521,10 +4525,10 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4521clean_up_user_allocator: 4525clean_up_user_allocator:
4522 if (user_vma_start < user_vma_limit) 4526 if (user_vma_start < user_vma_limit)
4523 nvgpu_alloc_destroy(&vm->user); 4527 nvgpu_alloc_destroy(&vm->user);
4524clean_up_ptes: 4528clean_up_page_tables:
4525 free_gmmu_pages(vm, &vm->pdb); 4529 /* Cleans up init_vm_page_tables() */
4526clean_up_pdes:
4527 vfree(vm->pdb.entries); 4530 vfree(vm->pdb.entries);
4531 free_gmmu_pages(vm, &vm->pdb);
4528 return err; 4532 return err;
4529} 4533}
4530 4534