summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 42af9ee1..2198b115 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -156,7 +156,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
156 /* FIXME: add VPR support */ 156 /* FIXME: add VPR support */
157 157
158 /* Circular Buffer */ 158 /* Circular Buffer */
159 gpu_va = gk20a_vm_alloc_va(ch_vm, 159 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
160 gr->global_ctx_buffer[CIRCULAR].mem.size, 160 gr->global_ctx_buffer[CIRCULAR].mem.size,
161 gmmu_page_size_kernel); 161 gmmu_page_size_kernel);
162 162
@@ -166,7 +166,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; 166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
167 167
168 /* Attribute Buffer */ 168 /* Attribute Buffer */
169 gpu_va = gk20a_vm_alloc_va(ch_vm, 169 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
170 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 170 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
171 gmmu_page_size_kernel); 171 gmmu_page_size_kernel);
172 172
@@ -176,7 +176,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; 176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
177 177
178 /* Page Pool */ 178 /* Page Pool */
179 gpu_va = gk20a_vm_alloc_va(ch_vm, 179 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
180 gr->global_ctx_buffer[PAGEPOOL].mem.size, 180 gr->global_ctx_buffer[PAGEPOOL].mem.size,
181 gmmu_page_size_kernel); 181 gmmu_page_size_kernel);
182 if (!gpu_va) 182 if (!gpu_va)
@@ -185,7 +185,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; 185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
186 186
187 /* Priv register Access Map */ 187 /* Priv register Access Map */
188 gpu_va = gk20a_vm_alloc_va(ch_vm, 188 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
190 gmmu_page_size_kernel); 190 gmmu_page_size_kernel);
191 if (!gpu_va) 191 if (!gpu_va)
@@ -211,8 +211,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
211 clean_up: 211 clean_up:
212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
213 if (g_bfr_va[i]) { 213 if (g_bfr_va[i]) {
214 gk20a_vm_free_va(ch_vm, g_bfr_va[i], 214 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
215 g_bfr_size[i], gmmu_page_size_kernel); 215 gmmu_page_size_kernel);
216 g_bfr_va[i] = 0; 216 g_bfr_va[i] = 0;
217 } 217 }
218 } 218 }
@@ -242,8 +242,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
242 242
243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
244 if (g_bfr_va[i]) { 244 if (g_bfr_va[i]) {
245 gk20a_vm_free_va(ch_vm, g_bfr_va[i], g_bfr_size[i], 245 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
246 gmmu_page_size_kernel); 246 gmmu_page_size_kernel);
247 g_bfr_va[i] = 0; 247 g_bfr_va[i] = 0;
248 g_bfr_size[i] = 0; 248 g_bfr_size[i] = 0;
249 } 249 }
@@ -277,7 +277,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
277 return -ENOMEM; 277 return -ENOMEM;
278 278
279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; 279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
280 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm, 280 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
281 gr_ctx->mem.size, 281 gr_ctx->mem.size,
282 gmmu_page_size_kernel); 282 gmmu_page_size_kernel);
283 283
@@ -296,8 +296,8 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
296 296
297 if (unlikely(err)) { 297 if (unlikely(err)) {
298 nvgpu_err(g, "fail to alloc gr_ctx"); 298 nvgpu_err(g, "fail to alloc gr_ctx");
299 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 299 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
300 gr_ctx->mem.size, gmmu_page_size_kernel); 300 gmmu_page_size_kernel);
301 nvgpu_kfree(g, gr_ctx); 301 nvgpu_kfree(g, gr_ctx);
302 } else { 302 } else {
303 gr_ctx->virt_ctx = p->gr_ctx_handle; 303 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -323,8 +323,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
324 WARN_ON(err || msg.ret); 324 WARN_ON(err || msg.ret);
325 325
326 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 326 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
327 gmmu_page_size_kernel); 327 gmmu_page_size_kernel);
328 nvgpu_kfree(g, gr_ctx); 328 nvgpu_kfree(g, gr_ctx);
329 } 329 }
330} 330}
@@ -349,7 +349,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
349 gk20a_dbg_fn(""); 349 gk20a_dbg_fn("");
350 350
351 patch_ctx->mem.size = 128 * sizeof(u32); 351 patch_ctx->mem.size = 128 * sizeof(u32);
352 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, 352 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
353 patch_ctx->mem.size, 353 patch_ctx->mem.size,
354 gmmu_page_size_kernel); 354 gmmu_page_size_kernel);
355 if (!patch_ctx->mem.gpu_va) 355 if (!patch_ctx->mem.gpu_va)
@@ -361,8 +361,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
361 p->patch_ctx_va = patch_ctx->mem.gpu_va; 361 p->patch_ctx_va = patch_ctx->mem.gpu_va;
362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
363 if (err || msg.ret) { 363 if (err || msg.ret) {
364 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 364 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
365 patch_ctx->mem.size, gmmu_page_size_kernel); 365 gmmu_page_size_kernel);
366 err = -ENOMEM; 366 err = -ENOMEM;
367 } 367 }
368 368
@@ -387,8 +387,8 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
388 WARN_ON(err || msg.ret); 388 WARN_ON(err || msg.ret);
389 389
390 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 390 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
391 patch_ctx->mem.size, gmmu_page_size_kernel); 391 gmmu_page_size_kernel);
392 patch_ctx->mem.gpu_va = 0; 392 patch_ctx->mem.gpu_va = 0;
393 } 393 }
394} 394}
@@ -413,8 +413,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
414 WARN_ON(err || msg.ret); 414 WARN_ON(err || msg.ret);
415 415
416 gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 416 __nvgpu_vm_free_va(c->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 gmmu_page_size_kernel);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -1046,7 +1046,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1046 1046
1047 /* Allocate buffer if necessary */ 1047 /* Allocate buffer if necessary */
1048 if (pm_ctx->mem.gpu_va == 0) { 1048 if (pm_ctx->mem.gpu_va == 0) {
1049 pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, 1049 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1050 g->gr.ctx_vars.pm_ctxsw_image_size, 1050 g->gr.ctx_vars.pm_ctxsw_image_size,
1051 gmmu_page_size_kernel); 1051 gmmu_page_size_kernel);
1052 1052