summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-26 17:27:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-24 15:14:13 -0400
commitb70bad4b9f40e94f731fd9d509e1f3f6617f0b05 (patch)
tree21bfaf082aeb7662eb194f72c5f33a36c7cb7bdc /drivers/gpu/nvgpu
parent92fe030e5250409ecd500dcf719547f3fb0f1873 (diff)
gpu: nvgpu: Refactor gk20a_vm_alloc_va()
This function is an internal function to the VM manager that allocates virtual memory space in the GVA allocator. It is unfortunately used in the vGPU code, though. In any event, this patch cleans up and moves the implementation of these functions into the VM common code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I24a3d29b5fcb12615df27d2ac82891d1bacfe541 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477745 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c48
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c57
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h8
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h13
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c42
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c4
9 files changed, 90 insertions, 89 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 5470d9ee..9238a9df 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -394,7 +394,7 @@ clean_up:
394 } 394 }
395 nvgpu_kfree(g, mapped_buffer); 395 nvgpu_kfree(g, mapped_buffer);
396 if (va_allocated) 396 if (va_allocated)
397 gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx); 397 __nvgpu_vm_free_va(vm, map_offset, bfr.pgsz_idx);
398 if (!IS_ERR(bfr.sgt)) 398 if (!IS_ERR(bfr.sgt))
399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt); 399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt);
400 400
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 3bdc905e..3b3b7a10 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -29,6 +29,54 @@ int vm_aspace_id(struct vm_gk20a *vm)
29 return vm->as_share ? vm->as_share->id : -1; 29 return vm->as_share ? vm->as_share->id : -1;
30} 30}
31 31
32u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
33 enum gmmu_pgsz_gk20a pgsz_idx)
34
35{
36 struct gk20a *g = vm->mm->g;
37 struct nvgpu_allocator *vma = NULL;
38 u64 addr;
39 u64 page_size = vm->gmmu_page_sizes[pgsz_idx];
40
41 vma = vm->vma[pgsz_idx];
42
43 if (pgsz_idx >= gmmu_nr_page_sizes) {
44 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
45 return 0;
46 }
47
48 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
49 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
50 return 0;
51 }
52
53 /* Be certain we round up to page_size if needed */
54 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1);
55 nvgpu_log(g, gpu_dbg_map, "size=0x%llx @ pgsz=%dKB", size,
56 vm->gmmu_page_sizes[pgsz_idx] >> 10);
57
58 addr = nvgpu_alloc(vma, size);
59 if (!addr) {
60 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size);
61 return 0;
62 }
63
64 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
65 return addr;
66}
67
68int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
69 enum gmmu_pgsz_gk20a pgsz_idx)
70{
71 struct gk20a *g = vm->mm->g;
72 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
73
74 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
75 nvgpu_free(vma, addr);
76
77 return 0;
78}
79
32void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) 80void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
33{ 81{
34 memset(mapping_batch, 0, sizeof(*mapping_batch)); 82 memset(mapping_batch, 0, sizeof(*mapping_batch));
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 5051f028..2642a0b1 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1192,57 +1192,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1192 nvgpu_mutex_release(&vm->update_gmmu_lock); 1192 nvgpu_mutex_release(&vm->update_gmmu_lock);
1193} 1193}
1194 1194
1195u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1196 u64 size,
1197 enum gmmu_pgsz_gk20a gmmu_pgsz_idx)
1198
1199{
1200 struct nvgpu_allocator *vma = vm->vma[gmmu_pgsz_idx];
1201 u64 offset;
1202 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
1203 struct gk20a *g = vm->mm->g;
1204
1205 if (gmmu_pgsz_idx >= gmmu_nr_page_sizes) {
1206 nvgpu_warn(g,
1207 "invalid page size requested in gk20a vm alloc");
1208 return 0;
1209 }
1210
1211 if ((gmmu_pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
1212 nvgpu_warn(g, "unsupportd page size requested");
1213 return 0;
1214
1215 }
1216
1217 /* Be certain we round up to gmmu_page_size if needed */
1218 size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1);
1219 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
1220 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10);
1221
1222 offset = nvgpu_alloc(vma, size);
1223 if (!offset) {
1224 nvgpu_err(vm->mm->g,
1225 "%s oom: sz=0x%llx", vma->name, size);
1226 return 0;
1227 }
1228
1229 gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
1230 return offset;
1231}
1232
1233int gk20a_vm_free_va(struct vm_gk20a *vm,
1234 u64 offset, u64 size,
1235 enum gmmu_pgsz_gk20a pgsz_idx)
1236{
1237 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
1238
1239 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
1240 vma->name, offset, size);
1241 nvgpu_free(vma, offset);
1242
1243 return 0;
1244}
1245
1246int setup_buffer_kind_and_compression(struct vm_gk20a *vm, 1195int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1247 u32 flags, 1196 u32 flags,
1248 struct buffer_attrs *bfr, 1197 struct buffer_attrs *bfr,
@@ -1313,7 +1262,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1313 1262
1314 /* Allocate (or validate when map_offset != 0) the virtual address. */ 1263 /* Allocate (or validate when map_offset != 0) the virtual address. */
1315 if (!map_offset) { 1264 if (!map_offset) {
1316 map_offset = gk20a_vm_alloc_va(vm, size, 1265 map_offset = __nvgpu_vm_alloc_va(vm, size,
1317 pgsz_idx); 1266 pgsz_idx);
1318 if (!map_offset) { 1267 if (!map_offset) {
1319 nvgpu_err(g, "failed to allocate va space"); 1268 nvgpu_err(g, "failed to allocate va space");
@@ -1364,7 +1313,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1364 return map_offset; 1313 return map_offset;
1365fail_validate: 1314fail_validate:
1366 if (allocated) 1315 if (allocated)
1367 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx); 1316 __nvgpu_vm_free_va(vm, map_offset, pgsz_idx);
1368fail_alloc: 1317fail_alloc:
1369 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); 1318 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
1370 return 0; 1319 return 0;
@@ -1383,7 +1332,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1383 struct gk20a *g = gk20a_from_vm(vm); 1332 struct gk20a *g = gk20a_from_vm(vm);
1384 1333
1385 if (va_allocated) { 1334 if (va_allocated) {
1386 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 1335 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
1387 if (err) { 1336 if (err) {
1388 nvgpu_err(g, "failed to free va"); 1337 nvgpu_err(g, "failed to free va");
1389 return; 1338 return;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 6ddf842a..27681199 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -412,14 +412,6 @@ int nvgpu_vm_get_compbits_info(struct vm_gk20a *vm,
412 u32 *mapping_ctagline, 412 u32 *mapping_ctagline,
413 u32 *flags); 413 u32 *flags);
414 414
415u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
416 u64 size,
417 enum gmmu_pgsz_gk20a gmmu_pgsz_idx);
418
419int gk20a_vm_free_va(struct vm_gk20a *vm,
420 u64 offset, u64 size,
421 enum gmmu_pgsz_gk20a pgsz_idx);
422
423/* vm-as interface */ 415/* vm-as interface */
424struct nvgpu_as_alloc_space_args; 416struct nvgpu_as_alloc_space_args;
425struct nvgpu_as_free_space_args; 417struct nvgpu_as_free_space_args;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index 69c08c77..fb55483d 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -242,4 +242,17 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
242 char *name); 242 char *name);
243void nvgpu_deinit_vm(struct vm_gk20a *vm); 243void nvgpu_deinit_vm(struct vm_gk20a *vm);
244 244
245/*
246 * These are private to the VM code but are unfortunately used by the vgpu code.
247 * It appears to be used for an optimization in reducing the number of server
248 * requests to the vgpu server. Basically the vgpu implementation of
249 * map_global_ctx_buffers() sends a bunch of VA ranges over to the RM server.
250 * Ideally the RM server can just batch mappings but until such a time this
251 * will be used by the vgpu code.
252 */
253u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
254 enum gmmu_pgsz_gk20a pgsz_idx);
255int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
256 enum gmmu_pgsz_gk20a pgsz_idx);
257
245#endif 258#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index b5c9735c..cac1db29 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -40,8 +40,7 @@ static void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
40 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 40 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
41 WARN_ON(err || msg.ret); 41 WARN_ON(err || msg.ret);
42 42
43 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 43 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
44 gmmu_page_size_kernel);
45 44
46 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); 45 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
47 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 46 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 15ff10b9..f425b7e5 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -81,7 +81,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
81 81
82 /* Allocate (or validate when map_offset != 0) the virtual address. */ 82 /* Allocate (or validate when map_offset != 0) the virtual address. */
83 if (!map_offset) { 83 if (!map_offset) {
84 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); 84 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
85 if (!map_offset) { 85 if (!map_offset) {
86 nvgpu_err(g, "failed to allocate va space"); 86 nvgpu_err(g, "failed to allocate va space");
87 err = -ENOMEM; 87 err = -ENOMEM;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 42af9ee1..2198b115 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -156,7 +156,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
156 /* FIXME: add VPR support */ 156 /* FIXME: add VPR support */
157 157
158 /* Circular Buffer */ 158 /* Circular Buffer */
159 gpu_va = gk20a_vm_alloc_va(ch_vm, 159 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
160 gr->global_ctx_buffer[CIRCULAR].mem.size, 160 gr->global_ctx_buffer[CIRCULAR].mem.size,
161 gmmu_page_size_kernel); 161 gmmu_page_size_kernel);
162 162
@@ -166,7 +166,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; 166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
167 167
168 /* Attribute Buffer */ 168 /* Attribute Buffer */
169 gpu_va = gk20a_vm_alloc_va(ch_vm, 169 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
170 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 170 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
171 gmmu_page_size_kernel); 171 gmmu_page_size_kernel);
172 172
@@ -176,7 +176,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; 176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
177 177
178 /* Page Pool */ 178 /* Page Pool */
179 gpu_va = gk20a_vm_alloc_va(ch_vm, 179 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
180 gr->global_ctx_buffer[PAGEPOOL].mem.size, 180 gr->global_ctx_buffer[PAGEPOOL].mem.size,
181 gmmu_page_size_kernel); 181 gmmu_page_size_kernel);
182 if (!gpu_va) 182 if (!gpu_va)
@@ -185,7 +185,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; 185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
186 186
187 /* Priv register Access Map */ 187 /* Priv register Access Map */
188 gpu_va = gk20a_vm_alloc_va(ch_vm, 188 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
190 gmmu_page_size_kernel); 190 gmmu_page_size_kernel);
191 if (!gpu_va) 191 if (!gpu_va)
@@ -211,8 +211,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
211 clean_up: 211 clean_up:
212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
213 if (g_bfr_va[i]) { 213 if (g_bfr_va[i]) {
214 gk20a_vm_free_va(ch_vm, g_bfr_va[i], 214 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
215 g_bfr_size[i], gmmu_page_size_kernel); 215 gmmu_page_size_kernel);
216 g_bfr_va[i] = 0; 216 g_bfr_va[i] = 0;
217 } 217 }
218 } 218 }
@@ -242,8 +242,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
242 242
243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
244 if (g_bfr_va[i]) { 244 if (g_bfr_va[i]) {
245 gk20a_vm_free_va(ch_vm, g_bfr_va[i], g_bfr_size[i], 245 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
246 gmmu_page_size_kernel); 246 gmmu_page_size_kernel);
247 g_bfr_va[i] = 0; 247 g_bfr_va[i] = 0;
248 g_bfr_size[i] = 0; 248 g_bfr_size[i] = 0;
249 } 249 }
@@ -277,7 +277,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
277 return -ENOMEM; 277 return -ENOMEM;
278 278
279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; 279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
280 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm, 280 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
281 gr_ctx->mem.size, 281 gr_ctx->mem.size,
282 gmmu_page_size_kernel); 282 gmmu_page_size_kernel);
283 283
@@ -296,8 +296,8 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
296 296
297 if (unlikely(err)) { 297 if (unlikely(err)) {
298 nvgpu_err(g, "fail to alloc gr_ctx"); 298 nvgpu_err(g, "fail to alloc gr_ctx");
299 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 299 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
300 gr_ctx->mem.size, gmmu_page_size_kernel); 300 gmmu_page_size_kernel);
301 nvgpu_kfree(g, gr_ctx); 301 nvgpu_kfree(g, gr_ctx);
302 } else { 302 } else {
303 gr_ctx->virt_ctx = p->gr_ctx_handle; 303 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -323,8 +323,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
324 WARN_ON(err || msg.ret); 324 WARN_ON(err || msg.ret);
325 325
326 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 326 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
327 gmmu_page_size_kernel); 327 gmmu_page_size_kernel);
328 nvgpu_kfree(g, gr_ctx); 328 nvgpu_kfree(g, gr_ctx);
329 } 329 }
330} 330}
@@ -349,7 +349,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
349 gk20a_dbg_fn(""); 349 gk20a_dbg_fn("");
350 350
351 patch_ctx->mem.size = 128 * sizeof(u32); 351 patch_ctx->mem.size = 128 * sizeof(u32);
352 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, 352 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
353 patch_ctx->mem.size, 353 patch_ctx->mem.size,
354 gmmu_page_size_kernel); 354 gmmu_page_size_kernel);
355 if (!patch_ctx->mem.gpu_va) 355 if (!patch_ctx->mem.gpu_va)
@@ -361,8 +361,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
361 p->patch_ctx_va = patch_ctx->mem.gpu_va; 361 p->patch_ctx_va = patch_ctx->mem.gpu_va;
362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
363 if (err || msg.ret) { 363 if (err || msg.ret) {
364 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 364 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
365 patch_ctx->mem.size, gmmu_page_size_kernel); 365 gmmu_page_size_kernel);
366 err = -ENOMEM; 366 err = -ENOMEM;
367 } 367 }
368 368
@@ -387,8 +387,8 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
388 WARN_ON(err || msg.ret); 388 WARN_ON(err || msg.ret);
389 389
390 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 390 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
391 patch_ctx->mem.size, gmmu_page_size_kernel); 391 gmmu_page_size_kernel);
392 patch_ctx->mem.gpu_va = 0; 392 patch_ctx->mem.gpu_va = 0;
393 } 393 }
394} 394}
@@ -413,8 +413,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
414 WARN_ON(err || msg.ret); 414 WARN_ON(err || msg.ret);
415 415
416 gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 416 __nvgpu_vm_free_va(c->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 gmmu_page_size_kernel);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -1046,7 +1046,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1046 1046
1047 /* Allocate buffer if necessary */ 1047 /* Allocate buffer if necessary */
1048 if (pm_ctx->mem.gpu_va == 0) { 1048 if (pm_ctx->mem.gpu_va == 0) {
1049 pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, 1049 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1050 g->gr.ctx_vars.pm_ctxsw_image_size, 1050 g->gr.ctx_vars.pm_ctxsw_image_size,
1051 gmmu_page_size_kernel); 1051 gmmu_page_size_kernel);
1052 1052
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index b42fbcb3..b8b5985c 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -106,7 +106,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
106 106
107 /* Allocate (or validate when map_offset != 0) the virtual address. */ 107 /* Allocate (or validate when map_offset != 0) the virtual address. */
108 if (!map_offset) { 108 if (!map_offset) {
109 map_offset = gk20a_vm_alloc_va(vm, size, 109 map_offset = __nvgpu_vm_alloc_va(vm, size,
110 pgsz_idx); 110 pgsz_idx);
111 if (!map_offset) { 111 if (!map_offset) {
112 nvgpu_err(g, "failed to allocate va space\n"); 112 nvgpu_err(g, "failed to allocate va space\n");
@@ -180,7 +180,7 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
180 gk20a_dbg_fn(""); 180 gk20a_dbg_fn("");
181 181
182 if (va_allocated) { 182 if (va_allocated) {
183 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 183 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
184 if (err) { 184 if (err) {
185 dev_err(dev_from_vm(vm), 185 dev_err(dev_from_vm(vm),
186 "failed to free va"); 186 "failed to free va");