summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c183
1 files changed, 40 insertions, 143 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 201c2090..72a3ee13 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -55,6 +55,12 @@
55#include <nvgpu/hw/gk20a/hw_flush_gk20a.h> 55#include <nvgpu/hw/gk20a/hw_flush_gk20a.h>
56#include <nvgpu/hw/gk20a/hw_ltc_gk20a.h> 56#include <nvgpu/hw/gk20a/hw_ltc_gk20a.h>
57 57
58/*
59 * Necessary while transitioning to less coupled code. Will be removed once
60 * all the common APIs no longers have Linux stuff in them.
61 */
62#include "common/linux/vm_priv.h"
63
58#if defined(CONFIG_GK20A_VIDMEM) 64#if defined(CONFIG_GK20A_VIDMEM)
59static void gk20a_vidmem_clear_mem_worker(struct work_struct *work); 65static void gk20a_vidmem_clear_mem_worker(struct work_struct *work);
60#endif 66#endif
@@ -177,8 +183,6 @@ struct gk20a_vidmem_buf {
177 void (*dmabuf_priv_delete)(void *); 183 void (*dmabuf_priv_delete)(void *);
178}; 184};
179 185
180static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm);
181
182static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator, 186static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
183 u32 *offset, u32 len) 187 u32 *offset, u32 len)
184{ 188{
@@ -460,16 +464,6 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
460 return 0; 464 return 0;
461} 465}
462 466
463void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
464{
465 struct gk20a *g = vm->mm->g;
466
467 gk20a_dbg_fn("");
468
469 gk20a_free_inst_block(g, inst_block);
470 gk20a_vm_remove_support_nofree(vm);
471}
472
473static void gk20a_vidmem_destroy(struct gk20a *g) 467static void gk20a_vidmem_destroy(struct gk20a *g)
474{ 468{
475#if defined(CONFIG_GK20A_VIDMEM) 469#if defined(CONFIG_GK20A_VIDMEM)
@@ -487,7 +481,7 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm)
487 481
488 mm->vidmem.ce_ctx_id = (u32)~0; 482 mm->vidmem.ce_ctx_id = (u32)~0;
489 483
490 gk20a_vm_remove_support_nofree(&mm->ce.vm); 484 nvgpu_vm_remove_support_nofree(&mm->ce.vm);
491 485
492} 486}
493 487
@@ -503,7 +497,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
503 497
504 gk20a_remove_vm(&mm->pmu.vm, &mm->pmu.inst_block); 498 gk20a_remove_vm(&mm->pmu.vm, &mm->pmu.inst_block);
505 gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block); 499 gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block);
506 gk20a_vm_remove_support_nofree(&mm->cde.vm); 500 nvgpu_vm_remove_support_nofree(&mm->cde.vm);
507 501
508 gk20a_semaphore_sea_destroy(g); 502 gk20a_semaphore_sea_destroy(g);
509 gk20a_vidmem_destroy(g); 503 gk20a_vidmem_destroy(g);
@@ -1102,7 +1096,7 @@ static struct vm_reserved_va_node *addr_to_reservation(struct vm_gk20a *vm,
1102 return NULL; 1096 return NULL;
1103} 1097}
1104 1098
1105int gk20a_vm_get_buffers(struct vm_gk20a *vm, 1099int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
1106 struct mapped_buffer_node ***mapped_buffers, 1100 struct mapped_buffer_node ***mapped_buffers,
1107 int *num_buffers) 1101 int *num_buffers)
1108{ 1102{
@@ -1151,37 +1145,10 @@ static void gk20a_vm_unmap_locked_kref(struct kref *ref)
1151{ 1145{
1152 struct mapped_buffer_node *mapped_buffer = 1146 struct mapped_buffer_node *mapped_buffer =
1153 container_of(ref, struct mapped_buffer_node, ref); 1147 container_of(ref, struct mapped_buffer_node, ref);
1154 gk20a_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); 1148 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
1155}
1156
1157void gk20a_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
1158{
1159 memset(mapping_batch, 0, sizeof(*mapping_batch));
1160 mapping_batch->gpu_l2_flushed = false;
1161 mapping_batch->need_tlb_invalidate = false;
1162} 1149}
1163 1150
1164void gk20a_vm_mapping_batch_finish_locked( 1151void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
1165 struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch)
1166{
1167 /* hanging kref_put batch pointer? */
1168 WARN_ON(vm->kref_put_batch == mapping_batch);
1169
1170 if (mapping_batch->need_tlb_invalidate) {
1171 struct gk20a *g = gk20a_from_vm(vm);
1172 g->ops.fb.tlb_invalidate(g, &vm->pdb.mem);
1173 }
1174}
1175
1176void gk20a_vm_mapping_batch_finish(struct vm_gk20a *vm,
1177 struct vm_gk20a_mapping_batch *mapping_batch)
1178{
1179 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1180 gk20a_vm_mapping_batch_finish_locked(vm, mapping_batch);
1181 nvgpu_mutex_release(&vm->update_gmmu_lock);
1182}
1183
1184void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1185 struct mapped_buffer_node **mapped_buffers, 1152 struct mapped_buffer_node **mapped_buffers,
1186 int num_buffers) 1153 int num_buffers)
1187{ 1154{
@@ -1192,7 +1159,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1192 return; 1159 return;
1193 1160
1194 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1161 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1195 gk20a_vm_mapping_batch_start(&batch); 1162 nvgpu_vm_mapping_batch_start(&batch);
1196 vm->kref_put_batch = &batch; 1163 vm->kref_put_batch = &batch;
1197 1164
1198 for (i = 0; i < num_buffers; ++i) 1165 for (i = 0; i < num_buffers; ++i)
@@ -1200,13 +1167,13 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1200 gk20a_vm_unmap_locked_kref); 1167 gk20a_vm_unmap_locked_kref);
1201 1168
1202 vm->kref_put_batch = NULL; 1169 vm->kref_put_batch = NULL;
1203 gk20a_vm_mapping_batch_finish_locked(vm, &batch); 1170 nvgpu_vm_mapping_batch_finish_locked(vm, &batch);
1204 nvgpu_mutex_release(&vm->update_gmmu_lock); 1171 nvgpu_mutex_release(&vm->update_gmmu_lock);
1205 1172
1206 nvgpu_big_free(vm->mm->g, mapped_buffers); 1173 nvgpu_big_free(vm->mm->g, mapped_buffers);
1207} 1174}
1208 1175
1209static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 1176static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1210 struct vm_gk20a_mapping_batch *batch) 1177 struct vm_gk20a_mapping_batch *batch)
1211{ 1178{
1212 struct gk20a *g = vm->mm->g; 1179 struct gk20a *g = vm->mm->g;
@@ -1650,7 +1617,7 @@ static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
1650 } 1617 }
1651} 1618}
1652 1619
1653static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm, 1620static u64 nvgpu_vm_map_duplicate_locked(struct vm_gk20a *vm,
1654 struct dma_buf *dmabuf, 1621 struct dma_buf *dmabuf,
1655 u64 offset_align, 1622 u64 offset_align,
1656 u32 flags, 1623 u32 flags,
@@ -1997,7 +1964,7 @@ static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
1997 return align; 1964 return align;
1998} 1965}
1999 1966
2000u64 gk20a_vm_map(struct vm_gk20a *vm, 1967u64 nvgpu_vm_map(struct vm_gk20a *vm,
2001 struct dma_buf *dmabuf, 1968 struct dma_buf *dmabuf,
2002 u64 offset_align, 1969 u64 offset_align,
2003 u32 flags /*NVGPU_AS_MAP_BUFFER_FLAGS_*/, 1970 u32 flags /*NVGPU_AS_MAP_BUFFER_FLAGS_*/,
@@ -2038,7 +2005,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2038 2005
2039 /* check if this buffer is already mapped */ 2006 /* check if this buffer is already mapped */
2040 if (!vm->userspace_managed) { 2007 if (!vm->userspace_managed) {
2041 map_offset = gk20a_vm_map_duplicate_locked( 2008 map_offset = nvgpu_vm_map_duplicate_locked(
2042 vm, dmabuf, offset_align, 2009 vm, dmabuf, offset_align,
2043 flags, kind, sgt, 2010 flags, kind, sgt,
2044 user_mapped, rw_flag); 2011 user_mapped, rw_flag);
@@ -2256,7 +2223,7 @@ clean_up:
2256 return 0; 2223 return 0;
2257} 2224}
2258 2225
2259int gk20a_vm_get_compbits_info(struct vm_gk20a *vm, 2226int nvgpu_vm_get_compbits_info(struct vm_gk20a *vm,
2260 u64 mapping_gva, 2227 u64 mapping_gva,
2261 u64 *compbits_win_size, 2228 u64 *compbits_win_size,
2262 u32 *compbits_win_ctagline, 2229 u32 *compbits_win_ctagline,
@@ -2298,7 +2265,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2298} 2265}
2299 2266
2300 2267
2301int gk20a_vm_map_compbits(struct vm_gk20a *vm, 2268int nvgpu_vm_map_compbits(struct vm_gk20a *vm,
2302 u64 mapping_gva, 2269 u64 mapping_gva,
2303 u64 *compbits_win_gva, 2270 u64 *compbits_win_gva,
2304 u64 *mapping_iova, 2271 u64 *mapping_iova,
@@ -3059,7 +3026,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3059} 3026}
3060 3027
3061/* NOTE! mapped_buffers lock must be held */ 3028/* NOTE! mapped_buffers lock must be held */
3062void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer, 3029void nvgpu_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
3063 struct vm_gk20a_mapping_batch *batch) 3030 struct vm_gk20a_mapping_batch *batch)
3064{ 3031{
3065 struct vm_gk20a *vm = mapped_buffer->vm; 3032 struct vm_gk20a *vm = mapped_buffer->vm;
@@ -3115,7 +3082,7 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
3115 return; 3082 return;
3116} 3083}
3117 3084
3118void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset) 3085void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset)
3119{ 3086{
3120 struct gk20a *g = vm->mm->g; 3087 struct gk20a *g = vm->mm->g;
3121 struct mapped_buffer_node *mapped_buffer; 3088 struct mapped_buffer_node *mapped_buffer;
@@ -3148,76 +3115,6 @@ static void gk20a_vm_free_entries(struct vm_gk20a *vm,
3148 parent->entries = NULL; 3115 parent->entries = NULL;
3149} 3116}
3150 3117
3151static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
3152{
3153 struct mapped_buffer_node *mapped_buffer;
3154 struct vm_reserved_va_node *va_node, *va_node_tmp;
3155 struct nvgpu_rbtree_node *node = NULL;
3156 struct gk20a *g = vm->mm->g;
3157
3158 gk20a_dbg_fn("");
3159
3160 /*
3161 * Do this outside of the update_gmmu_lock since unmapping the semaphore
3162 * pool involves unmapping a GMMU mapping which means aquiring the
3163 * update_gmmu_lock.
3164 */
3165 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_HAS_SYNCPOINTS)) {
3166 if (vm->sema_pool) {
3167 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
3168 nvgpu_semaphore_pool_put(vm->sema_pool);
3169 }
3170 }
3171
3172 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
3173
3174 /* TBD: add a flag here for the unmap code to recognize teardown
3175 * and short-circuit any otherwise expensive operations. */
3176
3177 nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
3178 while (node) {
3179 mapped_buffer = mapped_buffer_from_rbtree_node(node);
3180 gk20a_vm_unmap_locked(mapped_buffer, NULL);
3181 nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
3182 }
3183
3184 /* destroy remaining reserved memory areas */
3185 nvgpu_list_for_each_entry_safe(va_node, va_node_tmp,
3186 &vm->reserved_va_list,
3187 vm_reserved_va_node, reserved_va_list) {
3188 nvgpu_list_del(&va_node->reserved_va_list);
3189 nvgpu_kfree(vm->mm->g, va_node);
3190 }
3191
3192 gk20a_deinit_vm(vm);
3193
3194 nvgpu_mutex_release(&vm->update_gmmu_lock);
3195}
3196
3197void gk20a_vm_remove_support(struct vm_gk20a *vm)
3198{
3199 gk20a_vm_remove_support_nofree(vm);
3200 /* vm is not used anymore. release it. */
3201 nvgpu_kfree(vm->mm->g, vm);
3202}
3203
3204static void gk20a_vm_remove_support_kref(struct kref *ref)
3205{
3206 struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref);
3207 struct gk20a *g = gk20a_from_vm(vm);
3208 g->ops.mm.vm_remove(vm);
3209}
3210
3211void gk20a_vm_get(struct vm_gk20a *vm)
3212{
3213 kref_get(&vm->ref);
3214}
3215
3216void gk20a_vm_put(struct vm_gk20a *vm)
3217{
3218 kref_put(&vm->ref, gk20a_vm_remove_support_kref);
3219}
3220
3221const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { 3118const struct gk20a_mmu_level gk20a_mm_levels_64k[] = {
3222 {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1}, 3119 {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1},
3223 .lo_bit = {26, 26}, 3120 .lo_bit = {26, 26},
@@ -3284,7 +3181,7 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
3284 SZ_4K); 3181 SZ_4K);
3285 if (!sema_sea->gpu_va) { 3182 if (!sema_sea->gpu_va) {
3286 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 3183 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
3287 gk20a_vm_put(vm); 3184 nvgpu_vm_put(vm);
3288 return -ENOMEM; 3185 return -ENOMEM;
3289 } 3186 }
3290 3187
@@ -3408,7 +3305,7 @@ static int init_vm_page_tables(struct vm_gk20a *vm)
3408} 3305}
3409 3306
3410/** 3307/**
3411 * gk20a_init_vm() - Initialize an address space. 3308 * nvgpu_init_vm() - Initialize an address space.
3412 * 3309 *
3413 * @mm - Parent MM. 3310 * @mm - Parent MM.
3414 * @vm - The VM to init. 3311 * @vm - The VM to init.
@@ -3443,7 +3340,7 @@ static int init_vm_page_tables(struct vm_gk20a *vm)
3443 * such cases the @kernel_reserved and @low_hole should sum to exactly 3340 * such cases the @kernel_reserved and @low_hole should sum to exactly
3444 * @aperture_size. 3341 * @aperture_size.
3445 */ 3342 */
3446int gk20a_init_vm(struct mm_gk20a *mm, 3343int nvgpu_init_vm(struct mm_gk20a *mm,
3447 struct vm_gk20a *vm, 3344 struct vm_gk20a *vm,
3448 u32 big_page_size, 3345 u32 big_page_size,
3449 u64 low_hole, 3346 u64 low_hole,
@@ -3683,7 +3580,7 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size,
3683 3580
3684 snprintf(name, sizeof(name), "as_%d", as_share->id); 3581 snprintf(name, sizeof(name), "as_%d", as_share->id);
3685 3582
3686 err = gk20a_init_vm(mm, vm, big_page_size, 3583 err = nvgpu_init_vm(mm, vm, big_page_size,
3687 big_page_size << 10, 3584 big_page_size << 10,
3688 mm->channel.kernel_size, 3585 mm->channel.kernel_size,
3689 mm->channel.user_size + mm->channel.kernel_size, 3586 mm->channel.user_size + mm->channel.kernel_size,
@@ -3701,7 +3598,7 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share)
3701 vm->as_share = NULL; 3598 vm->as_share = NULL;
3702 as_share->vm = NULL; 3599 as_share->vm = NULL;
3703 3600
3704 gk20a_vm_put(vm); 3601 nvgpu_vm_put(vm);
3705 3602
3706 return 0; 3603 return 0;
3707} 3604}
@@ -3864,7 +3761,7 @@ int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
3864 3761
3865 gk20a_dbg_fn(""); 3762 gk20a_dbg_fn("");
3866 3763
3867 gk20a_vm_get(vm); 3764 nvgpu_vm_get(vm);
3868 ch->vm = vm; 3765 ch->vm = vm;
3869 err = channel_gk20a_commit_va(ch); 3766 err = channel_gk20a_commit_va(ch);
3870 if (err) 3767 if (err)
@@ -3960,7 +3857,7 @@ out:
3960 3857
3961} 3858}
3962 3859
3963int gk20a_vm_map_buffer(struct vm_gk20a *vm, 3860int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
3964 int dmabuf_fd, 3861 int dmabuf_fd,
3965 u64 *offset_align, 3862 u64 *offset_align,
3966 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/ 3863 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
@@ -3989,7 +3886,7 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm,
3989 return err; 3886 return err;
3990 } 3887 }
3991 3888
3992 ret_va = gk20a_vm_map(vm, dmabuf, *offset_align, 3889 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align,
3993 flags, kind, NULL, true, 3890 flags, kind, NULL, true,
3994 gk20a_mem_flag_none, 3891 gk20a_mem_flag_none,
3995 buffer_offset, 3892 buffer_offset,
@@ -4005,16 +3902,16 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm,
4005 return err; 3902 return err;
4006} 3903}
4007 3904
4008int gk20a_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, 3905int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
4009 struct vm_gk20a_mapping_batch *batch) 3906 struct vm_gk20a_mapping_batch *batch)
4010{ 3907{
4011 gk20a_dbg_fn(""); 3908 gk20a_dbg_fn("");
4012 3909
4013 gk20a_vm_unmap_user(vm, offset, batch); 3910 nvgpu_vm_unmap_user(vm, offset, batch);
4014 return 0; 3911 return 0;
4015} 3912}
4016 3913
4017void gk20a_deinit_vm(struct vm_gk20a *vm) 3914void nvgpu_deinit_vm(struct vm_gk20a *vm)
4018{ 3915{
4019 if (nvgpu_alloc_initialized(&vm->kernel)) 3916 if (nvgpu_alloc_initialized(&vm->kernel))
4020 nvgpu_alloc_destroy(&vm->kernel); 3917 nvgpu_alloc_destroy(&vm->kernel);
@@ -4069,7 +3966,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
4069 3966
4070 mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20; 3967 mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
4071 gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size); 3968 gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
4072 gk20a_init_vm(mm, vm, 3969 nvgpu_init_vm(mm, vm,
4073 big_page_size, 3970 big_page_size,
4074 SZ_4K, /* Low hole */ 3971 SZ_4K, /* Low hole */
4075 mm->bar1.aperture_size - SZ_4K, /* Kernel reserved. */ 3972 mm->bar1.aperture_size - SZ_4K, /* Kernel reserved. */
@@ -4085,7 +3982,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
4085 return 0; 3982 return 0;
4086 3983
4087clean_up_va: 3984clean_up_va:
4088 gk20a_deinit_vm(vm); 3985 nvgpu_deinit_vm(vm);
4089 return err; 3986 return err;
4090} 3987}
4091 3988
@@ -4108,7 +4005,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
4108 mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; 4005 mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
4109 gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size); 4006 gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
4110 4007
4111 gk20a_init_vm(mm, vm, big_page_size, 4008 nvgpu_init_vm(mm, vm, big_page_size,
4112 low_hole, 4009 low_hole,
4113 aperture_size - low_hole, 4010 aperture_size - low_hole,
4114 aperture_size, 4011 aperture_size,
@@ -4124,7 +4021,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
4124 return 0; 4021 return 0;
4125 4022
4126clean_up_va: 4023clean_up_va:
4127 gk20a_deinit_vm(vm); 4024 nvgpu_deinit_vm(vm);
4128 return err; 4025 return err;
4129} 4026}
4130 4027
@@ -4149,7 +4046,7 @@ static int gk20a_init_cde_vm(struct mm_gk20a *mm)
4149 struct gk20a *g = gk20a_from_mm(mm); 4046 struct gk20a *g = gk20a_from_mm(mm);
4150 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size; 4047 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
4151 4048
4152 return gk20a_init_vm(mm, vm, big_page_size, 4049 return nvgpu_init_vm(mm, vm, big_page_size,
4153 big_page_size << 10, 4050 big_page_size << 10,
4154 NV_MM_DEFAULT_KERNEL_SIZE, 4051 NV_MM_DEFAULT_KERNEL_SIZE,
4155 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, 4052 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
@@ -4162,7 +4059,7 @@ static int gk20a_init_ce_vm(struct mm_gk20a *mm)
4162 struct gk20a *g = gk20a_from_mm(mm); 4059 struct gk20a *g = gk20a_from_mm(mm);
4163 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size; 4060 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
4164 4061
4165 return gk20a_init_vm(mm, vm, big_page_size, 4062 return nvgpu_init_vm(mm, vm, big_page_size,
4166 big_page_size << 10, 4063 big_page_size << 10,
4167 NV_MM_DEFAULT_KERNEL_SIZE, 4064 NV_MM_DEFAULT_KERNEL_SIZE,
4168 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, 4065 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
@@ -4399,7 +4296,7 @@ hw_was_off:
4399 gk20a_idle_nosuspend(g->dev); 4296 gk20a_idle_nosuspend(g->dev);
4400} 4297}
4401 4298
4402int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va, 4299int nvgpu_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
4403 struct dma_buf **dmabuf, 4300 struct dma_buf **dmabuf,
4404 u64 *offset) 4301 u64 *offset)
4405{ 4302{
@@ -4503,7 +4400,7 @@ void gk20a_init_mm(struct gpu_ops *gops)
4503{ 4400{
4504 gops->mm.gmmu_map = gk20a_locked_gmmu_map; 4401 gops->mm.gmmu_map = gk20a_locked_gmmu_map;
4505 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; 4402 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap;
4506 gops->mm.vm_remove = gk20a_vm_remove_support; 4403 gops->mm.vm_remove = nvgpu_vm_remove_support;
4507 gops->mm.vm_alloc_share = gk20a_vm_alloc_share; 4404 gops->mm.vm_alloc_share = gk20a_vm_alloc_share;
4508 gops->mm.vm_bind_channel = gk20a_vm_bind_channel; 4405 gops->mm.vm_bind_channel = gk20a_vm_bind_channel;
4509 gops->mm.fb_flush = gk20a_mm_fb_flush; 4406 gops->mm.fb_flush = gk20a_mm_fb_flush;