From 6df3992b60959d32c7113cb77e131a2547174f3a Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Tue, 20 Dec 2016 13:55:48 -0800 Subject: gpu: nvgpu: Move allocators to common/mm/ Move the GPU allocators to common/mm/ since the allocators are common code across all GPUs. Also rename the allocator code to move away from gk20a_ prefixed structs and functions. This caused one issue with the nvgpu_alloc() and nvgpu_free() functions. There was a function for allocating either with kmalloc() or vmalloc() depending on the size of the allocation. Those have now been renamed to nvgpu_kalloc() and nvgpu_kfree(). Bug 1799159 Change-Id: Iddda92c013612bcb209847084ec85b8953002fa5 Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1274400 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu') diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 69f6fcaf..66c9344b 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -227,11 +227,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); - if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small])) - gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); - if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big])) - gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); + nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); + if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small])) + nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]); + if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big])) + nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]); mutex_unlock(&vm->update_gmmu_lock); @@ -370,7 +370,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, gmmu_page_sizes[gmmu_page_size_small] >> 10); - err = __gk20a_buddy_allocator_init( + err = __nvgpu_buddy_allocator_init( g, &vm->vma[gmmu_page_size_small], vm, name, @@ -386,7 +386,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, if (large_vma_start < large_vma_limit) { snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, gmmu_page_sizes[gmmu_page_size_big] >> 10); - err = __gk20a_buddy_allocator_init( + err = __nvgpu_buddy_allocator_init( g, &vm->vma[gmmu_page_size_big], vm, name, @@ -404,7 +404,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, /* * kernel reserved VMA is at the end of the aperture */ - err = __gk20a_buddy_allocator_init( + err = __nvgpu_buddy_allocator_init( g, &vm->vma[gmmu_page_size_kernel], vm, name, @@ -428,10 +428,10 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, clean_up_big_allocator: if (large_vma_start < large_vma_limit) - gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); + nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]); clean_up_small_allocator: if (small_vma_start < small_vma_limit) - gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); + nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]); clean_up_share: msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; msg.handle = vgpu_get_handle(g); -- cgit v1.2.2