summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-06-24 17:12:24 -0400
committerAlex Waterman <alexw@nvidia.com>2016-07-19 14:21:46 -0400
commitb6569319c772d84087a0a1a6d7146bdcae8e9aab (patch)
tree16e7bae422279925301d9116b1e7f4d8aa656483 /drivers/gpu/nvgpu/vgpu
parentf4b77e465648e87b19a7df4bb2a121ac8ac1b851 (diff)
gpu: nvgpu: Support multiple types of allocators
Support multiple types of allocation backends. Currently there is only one allocator implementation available: a buddy allocator. Buddy allocators have certain limitations though. For one the allocator requires metadata to be allocated from the kernel's system memory. This causes a given buddy allocation to potentially sleep on a kmalloc() call. This patch has been created so that a new backend can be created which will avoid any dynamic system memory management routines from being called. Bug 1781897 Change-Id: I98d6c8402c049942f13fee69c6901a166f177f65 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1172115 GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 2239fcbc..c6f42703 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -230,11 +230,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
231 WARN_ON(err || msg.ret); 231 WARN_ON(err || msg.ret);
232 232
233 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_kernel]); 233 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
234 if (vm->vma[gmmu_page_size_small].init) 234 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small]))
235 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]); 235 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
236 if (vm->vma[gmmu_page_size_big].init) 236 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big]))
237 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_big]); 237 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]);
238 238
239 mutex_unlock(&vm->update_gmmu_lock); 239 mutex_unlock(&vm->update_gmmu_lock);
240 240
@@ -374,7 +374,8 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
374 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 374 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
375 gmmu_page_sizes[gmmu_page_size_small] >> 10); 375 gmmu_page_sizes[gmmu_page_size_small] >> 10);
376 376
377 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_small], 377 err = __gk20a_buddy_allocator_init(
378 &vm->vma[gmmu_page_size_small],
378 vm, name, 379 vm, name,
379 small_vma_start, 380 small_vma_start,
380 small_vma_limit - small_vma_start, 381 small_vma_limit - small_vma_start,
@@ -388,7 +389,8 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
388 if (large_vma_start < large_vma_limit) { 389 if (large_vma_start < large_vma_limit) {
389 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 390 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
390 gmmu_page_sizes[gmmu_page_size_big] >> 10); 391 gmmu_page_sizes[gmmu_page_size_big] >> 10);
391 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_big], 392 err = __gk20a_buddy_allocator_init(
393 &vm->vma[gmmu_page_size_big],
392 vm, name, 394 vm, name,
393 large_vma_start, 395 large_vma_start,
394 large_vma_limit - large_vma_start, 396 large_vma_limit - large_vma_start,
@@ -404,7 +406,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
404 /* 406 /*
405 * kernel reserved VMA is at the end of the aperture 407 * kernel reserved VMA is at the end of the aperture
406 */ 408 */
407 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_kernel], 409 err = __gk20a_buddy_allocator_init(&vm->vma[gmmu_page_size_kernel],
408 vm, name, 410 vm, name,
409 kernel_vma_start, 411 kernel_vma_start,
410 kernel_vma_limit - kernel_vma_start, 412 kernel_vma_limit - kernel_vma_start,
@@ -426,10 +428,10 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
426 428
427clean_up_big_allocator: 429clean_up_big_allocator:
428 if (large_vma_start < large_vma_limit) 430 if (large_vma_start < large_vma_limit)
429 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_big]); 431 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]);
430clean_up_small_allocator: 432clean_up_small_allocator:
431 if (small_vma_start < small_vma_limit) 433 if (small_vma_start < small_vma_limit)
432 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]); 434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
433clean_up_share: 435clean_up_share:
434 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
435 msg.handle = platform->virt_handle; 437 msg.handle = platform->virt_handle;