summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 2239fcbc..c6f42703 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -230,11 +230,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
231 WARN_ON(err || msg.ret); 231 WARN_ON(err || msg.ret);
232 232
233 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_kernel]); 233 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
234 if (vm->vma[gmmu_page_size_small].init) 234 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small]))
235 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]); 235 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
236 if (vm->vma[gmmu_page_size_big].init) 236 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big]))
237 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_big]); 237 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]);
238 238
239 mutex_unlock(&vm->update_gmmu_lock); 239 mutex_unlock(&vm->update_gmmu_lock);
240 240
@@ -374,7 +374,8 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
374 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 374 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
375 gmmu_page_sizes[gmmu_page_size_small] >> 10); 375 gmmu_page_sizes[gmmu_page_size_small] >> 10);
376 376
377 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_small], 377 err = __gk20a_buddy_allocator_init(
378 &vm->vma[gmmu_page_size_small],
378 vm, name, 379 vm, name,
379 small_vma_start, 380 small_vma_start,
380 small_vma_limit - small_vma_start, 381 small_vma_limit - small_vma_start,
@@ -388,7 +389,8 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
388 if (large_vma_start < large_vma_limit) { 389 if (large_vma_start < large_vma_limit) {
389 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 390 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
390 gmmu_page_sizes[gmmu_page_size_big] >> 10); 391 gmmu_page_sizes[gmmu_page_size_big] >> 10);
391 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_big], 392 err = __gk20a_buddy_allocator_init(
393 &vm->vma[gmmu_page_size_big],
392 vm, name, 394 vm, name,
393 large_vma_start, 395 large_vma_start,
394 large_vma_limit - large_vma_start, 396 large_vma_limit - large_vma_start,
@@ -404,7 +406,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
404 /* 406 /*
405 * kernel reserved VMA is at the end of the aperture 407 * kernel reserved VMA is at the end of the aperture
406 */ 408 */
407 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_kernel], 409 err = __gk20a_buddy_allocator_init(&vm->vma[gmmu_page_size_kernel],
408 vm, name, 410 vm, name,
409 kernel_vma_start, 411 kernel_vma_start,
410 kernel_vma_limit - kernel_vma_start, 412 kernel_vma_limit - kernel_vma_start,
@@ -426,10 +428,10 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
426 428
427clean_up_big_allocator: 429clean_up_big_allocator:
428 if (large_vma_start < large_vma_limit) 430 if (large_vma_start < large_vma_limit)
429 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_big]); 431 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]);
430clean_up_small_allocator: 432clean_up_small_allocator:
431 if (small_vma_start < small_vma_limit) 433 if (small_vma_start < small_vma_limit)
432 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]); 434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
433clean_up_share: 435clean_up_share:
434 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
435 msg.handle = platform->virt_handle; 437 msg.handle = platform->virt_handle;