summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 69f6fcaf..66c9344b 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -227,11 +227,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
228 WARN_ON(err || msg.ret); 228 WARN_ON(err || msg.ret);
229 229
230 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); 230 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
231 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small])) 231 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small]))
232 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 232 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
233 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big])) 233 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big]))
234 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 234 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
235 235
236 mutex_unlock(&vm->update_gmmu_lock); 236 mutex_unlock(&vm->update_gmmu_lock);
237 237
@@ -370,7 +370,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
370 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 370 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
371 gmmu_page_sizes[gmmu_page_size_small] >> 10); 371 gmmu_page_sizes[gmmu_page_size_small] >> 10);
372 372
373 err = __gk20a_buddy_allocator_init( 373 err = __nvgpu_buddy_allocator_init(
374 g, 374 g,
375 &vm->vma[gmmu_page_size_small], 375 &vm->vma[gmmu_page_size_small],
376 vm, name, 376 vm, name,
@@ -386,7 +386,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
386 if (large_vma_start < large_vma_limit) { 386 if (large_vma_start < large_vma_limit) {
387 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 387 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
388 gmmu_page_sizes[gmmu_page_size_big] >> 10); 388 gmmu_page_sizes[gmmu_page_size_big] >> 10);
389 err = __gk20a_buddy_allocator_init( 389 err = __nvgpu_buddy_allocator_init(
390 g, 390 g,
391 &vm->vma[gmmu_page_size_big], 391 &vm->vma[gmmu_page_size_big],
392 vm, name, 392 vm, name,
@@ -404,7 +404,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
404 /* 404 /*
405 * kernel reserved VMA is at the end of the aperture 405 * kernel reserved VMA is at the end of the aperture
406 */ 406 */
407 err = __gk20a_buddy_allocator_init( 407 err = __nvgpu_buddy_allocator_init(
408 g, 408 g,
409 &vm->vma[gmmu_page_size_kernel], 409 &vm->vma[gmmu_page_size_kernel],
410 vm, name, 410 vm, name,
@@ -428,10 +428,10 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
428 428
429clean_up_big_allocator: 429clean_up_big_allocator:
430 if (large_vma_start < large_vma_limit) 430 if (large_vma_start < large_vma_limit)
431 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 431 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
432clean_up_small_allocator: 432clean_up_small_allocator:
433 if (small_vma_start < small_vma_limit) 433 if (small_vma_start < small_vma_limit)
434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 434 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
435clean_up_share: 435clean_up_share:
436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
437 msg.handle = vgpu_get_handle(g); 437 msg.handle = vgpu_get_handle(g);