From d630f1d99f60b1c2ec87506a2738bac4d1895b07 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Wed, 17 Jun 2015 10:31:08 -0700 Subject: gpu: nvgpu: Unify the small and large page address spaces The basic structure of this patch is to make the small page allocator and the large page allocator into pointers (where they used to be just structs). Then assign each of those pointers to the same actual allocator since the buddy allocator has supported mixed page sizes since its inception. For the rest of the driver some changes had to be made in order to actually support mixed pages in a single address space. 1. Unifying the allocation page size determination Since the allocation and map operations happen at distinct times both mapping and allocation of GVA space must agree on page size. This is because the allocation has to separate allocations into separate PDEs to avoid the necessity of supporting mixed PDEs. To this end a function __get_pte_size() was introduced which is used both by the balloc code and the core GPU MM code. It determines page size based only on the length of the mapping/ allocation. 2. Fixed address allocation + page size Similar to regular mappings/GVA allocations fixed address mapping page size determination had to be modified. In the past the address of the mapping determined page size since the address space split was by address (low addresses were small pages, high addresses large pages). Since that is no longer the case the page size field in the reserve memory ioctl is now honored by the mapping code. When, for instance, CUDA makes a memory reservation it specifies small or large pages. When CUDA requests mappings to be made within that address range the page size is then looked up in the reserved memory struct. Fixed address reservations were also modified to now always allocate at a PDE granularity (64M or 128M depending on large page size. This prevents non-fixed allocations from ending up in the same PDE and causing kernel panics or GMMU faults. 3. The rest... The rest of the changes are just by products of the above. Lots of places required minor updates to use a pointer to the GVA allocator struct instead of the struct itself. Lastly, this change is not truly complete. More work remains to be done in order to fully remove the notion that there was such a thing as separate address spaces for different page sizes. Basically after this patch what remains is cleanup and proper documentation. Bug 1396644 Bug 1729947 Change-Id: If51ab396a37ba16c69e434adb47edeef083dce57 Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1265300 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 115 +++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 66 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c') diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 66c9344b..a21a020d 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -227,11 +227,12 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); - if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small])) - nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]); - if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big])) - nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]); + if (nvgpu_alloc_initialized(&vm->kernel)) + nvgpu_alloc_destroy(&vm->kernel); + if (nvgpu_alloc_initialized(&vm->user)) + nvgpu_alloc_destroy(&vm->user); + if (nvgpu_alloc_initialized(&vm->fixed)) + nvgpu_alloc_destroy(&vm->fixed); mutex_unlock(&vm->update_gmmu_lock); @@ -273,8 +274,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, struct tegra_vgpu_as_share_params *p = &msg.params.as_share; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm; - u64 small_vma_start, small_vma_limit, large_vma_start, large_vma_limit, - kernel_vma_start, kernel_vma_limit; + u64 user_vma_start, user_vma_limit, kernel_vma_start, kernel_vma_limit; char name[32]; int err, i; const bool userspace_managed = @@ -306,6 +306,11 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, vm->mm = mm; vm->as_share = as_share; + /* Set up vma pointers. */ + vm->vma[0] = &vm->user; + vm->vma[1] = &vm->user; + vm->vma[2] = &vm->kernel; + for (i = 0; i < gmmu_nr_page_sizes; i++) vm->gmmu_page_sizes[i] = gmmu_page_sizes[i]; @@ -328,93 +333,74 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, vm->handle = p->handle; /* setup vma limits */ - small_vma_start = vm->va_start; - - if (vm->big_pages) { - /* First 16GB of the address space goes towards small - * pages. The kernel reserved pages are at the end. - * What ever remains is allocated to large pages. - */ - small_vma_limit = __nv_gmmu_va_small_page_limit(); - large_vma_start = small_vma_limit; - large_vma_limit = vm->va_limit - mm->channel.kernel_size; - } else { - small_vma_limit = vm->va_limit - mm->channel.kernel_size; - large_vma_start = 0; - large_vma_limit = 0; - } + user_vma_start = vm->va_start; + user_vma_limit = vm->va_limit - mm->channel.kernel_size; kernel_vma_start = vm->va_limit - mm->channel.kernel_size; kernel_vma_limit = vm->va_limit; gk20a_dbg_info( - "small_vma=[0x%llx,0x%llx) large_vma=[0x%llx,0x%llx) kernel_vma=[0x%llx,0x%llx)\n", - small_vma_start, small_vma_limit, - large_vma_start, large_vma_limit, + "user_vma=[0x%llx,0x%llx) kernel_vma=[0x%llx,0x%llx)\n", + user_vma_start, user_vma_limit, kernel_vma_start, kernel_vma_limit); - /* check that starts do not exceed limits */ - WARN_ON(small_vma_start > small_vma_limit); - WARN_ON(large_vma_start > large_vma_limit); - /* kernel_vma must also be non-zero */ + WARN_ON(user_vma_start > user_vma_limit); WARN_ON(kernel_vma_start >= kernel_vma_limit); - if (small_vma_start > small_vma_limit || - large_vma_start > large_vma_limit || + if (user_vma_start > user_vma_limit || kernel_vma_start >= kernel_vma_limit) { err = -EINVAL; goto clean_up_share; } - if (small_vma_start < small_vma_limit) { + if (user_vma_start < user_vma_limit) { snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, gmmu_page_sizes[gmmu_page_size_small] >> 10); + if (!gk20a_big_pages_possible(vm, user_vma_start, + user_vma_limit - user_vma_start)) + vm->big_pages = false; err = __nvgpu_buddy_allocator_init( g, - &vm->vma[gmmu_page_size_small], + vm->vma[gmmu_page_size_small], vm, name, - small_vma_start, - small_vma_limit - small_vma_start, + user_vma_start, + user_vma_limit - user_vma_start, SZ_4K, GPU_BALLOC_MAX_ORDER, GPU_ALLOC_GVA_SPACE); if (err) goto clean_up_share; - } - - if (large_vma_start < large_vma_limit) { - snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, - gmmu_page_sizes[gmmu_page_size_big] >> 10); - err = __nvgpu_buddy_allocator_init( - g, - &vm->vma[gmmu_page_size_big], - vm, name, - large_vma_start, - large_vma_limit - large_vma_start, - big_page_size, - GPU_BALLOC_MAX_ORDER, - GPU_ALLOC_GVA_SPACE); - if (err) - goto clean_up_small_allocator; + } else { + /* + * Make these allocator pointers point to the kernel allocator + * since we still use the legacy notion of page size to choose + * the allocator. + */ + vm->vma[0] = &vm->kernel; + vm->vma[1] = &vm->kernel; } snprintf(name, sizeof(name), "gk20a_as_%dKB-sys", gmmu_page_sizes[gmmu_page_size_kernel] >> 10); + if (!gk20a_big_pages_possible(vm, kernel_vma_start, + kernel_vma_limit - kernel_vma_start)) + vm->big_pages = false; + /* * kernel reserved VMA is at the end of the aperture */ err = __nvgpu_buddy_allocator_init( - g, - &vm->vma[gmmu_page_size_kernel], - vm, name, - kernel_vma_start, - kernel_vma_limit - kernel_vma_start, - SZ_4K, - GPU_BALLOC_MAX_ORDER, - GPU_ALLOC_GVA_SPACE); + g, + vm->vma[gmmu_page_size_kernel], + vm, name, + kernel_vma_start, + kernel_vma_limit - kernel_vma_start, + SZ_4K, + GPU_BALLOC_MAX_ORDER, + GPU_ALLOC_GVA_SPACE); if (err) - goto clean_up_big_allocator; + goto clean_up_user_allocator; vm->mapped_buffers = RB_ROOT; @@ -426,12 +412,9 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, return 0; -clean_up_big_allocator: - if (large_vma_start < large_vma_limit) - nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]); -clean_up_small_allocator: - if (small_vma_start < small_vma_limit) - nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]); +clean_up_user_allocator: + if (user_vma_start < user_vma_limit) + nvgpu_alloc_destroy(&vm->user); clean_up_share: msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; msg.handle = vgpu_get_handle(g); -- cgit v1.2.2