summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-26 17:27:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-24 15:14:13 -0400
commitb70bad4b9f40e94f731fd9d509e1f3f6617f0b05 (patch)
tree21bfaf082aeb7662eb194f72c5f33a36c7cb7bdc /drivers/gpu/nvgpu/vgpu
parent92fe030e5250409ecd500dcf719547f3fb0f1873 (diff)
gpu: nvgpu: Refactor gk20a_vm_alloc_va()
This function is an internal function to the VM manager that allocates virtual memory space in the GVA allocator. It is unfortunately used in the vGPU code, though. In any event, this patch cleans up and moves the implementation of these functions into the VM common code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I24a3d29b5fcb12615df27d2ac82891d1bacfe541 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477745 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c42
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c4
4 files changed, 25 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index b5c9735c..cac1db29 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -40,8 +40,7 @@ static void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
40 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 40 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
41 WARN_ON(err || msg.ret); 41 WARN_ON(err || msg.ret);
42 42
43 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 43 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
44 gmmu_page_size_kernel);
45 44
46 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); 45 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
47 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 46 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 15ff10b9..f425b7e5 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -81,7 +81,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
81 81
82 /* Allocate (or validate when map_offset != 0) the virtual address. */ 82 /* Allocate (or validate when map_offset != 0) the virtual address. */
83 if (!map_offset) { 83 if (!map_offset) {
84 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); 84 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
85 if (!map_offset) { 85 if (!map_offset) {
86 nvgpu_err(g, "failed to allocate va space"); 86 nvgpu_err(g, "failed to allocate va space");
87 err = -ENOMEM; 87 err = -ENOMEM;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 42af9ee1..2198b115 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -156,7 +156,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
156 /* FIXME: add VPR support */ 156 /* FIXME: add VPR support */
157 157
158 /* Circular Buffer */ 158 /* Circular Buffer */
159 gpu_va = gk20a_vm_alloc_va(ch_vm, 159 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
160 gr->global_ctx_buffer[CIRCULAR].mem.size, 160 gr->global_ctx_buffer[CIRCULAR].mem.size,
161 gmmu_page_size_kernel); 161 gmmu_page_size_kernel);
162 162
@@ -166,7 +166,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; 166 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
167 167
168 /* Attribute Buffer */ 168 /* Attribute Buffer */
169 gpu_va = gk20a_vm_alloc_va(ch_vm, 169 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
170 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 170 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
171 gmmu_page_size_kernel); 171 gmmu_page_size_kernel);
172 172
@@ -176,7 +176,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; 176 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
177 177
178 /* Page Pool */ 178 /* Page Pool */
179 gpu_va = gk20a_vm_alloc_va(ch_vm, 179 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
180 gr->global_ctx_buffer[PAGEPOOL].mem.size, 180 gr->global_ctx_buffer[PAGEPOOL].mem.size,
181 gmmu_page_size_kernel); 181 gmmu_page_size_kernel);
182 if (!gpu_va) 182 if (!gpu_va)
@@ -185,7 +185,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; 185 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
186 186
187 /* Priv register Access Map */ 187 /* Priv register Access Map */
188 gpu_va = gk20a_vm_alloc_va(ch_vm, 188 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 189 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
190 gmmu_page_size_kernel); 190 gmmu_page_size_kernel);
191 if (!gpu_va) 191 if (!gpu_va)
@@ -211,8 +211,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
211 clean_up: 211 clean_up:
212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 212 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
213 if (g_bfr_va[i]) { 213 if (g_bfr_va[i]) {
214 gk20a_vm_free_va(ch_vm, g_bfr_va[i], 214 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
215 g_bfr_size[i], gmmu_page_size_kernel); 215 gmmu_page_size_kernel);
216 g_bfr_va[i] = 0; 216 g_bfr_va[i] = 0;
217 } 217 }
218 } 218 }
@@ -242,8 +242,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
242 242
243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 243 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
244 if (g_bfr_va[i]) { 244 if (g_bfr_va[i]) {
245 gk20a_vm_free_va(ch_vm, g_bfr_va[i], g_bfr_size[i], 245 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
246 gmmu_page_size_kernel); 246 gmmu_page_size_kernel);
247 g_bfr_va[i] = 0; 247 g_bfr_va[i] = 0;
248 g_bfr_size[i] = 0; 248 g_bfr_size[i] = 0;
249 } 249 }
@@ -277,7 +277,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
277 return -ENOMEM; 277 return -ENOMEM;
278 278
279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; 279 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
280 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm, 280 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
281 gr_ctx->mem.size, 281 gr_ctx->mem.size,
282 gmmu_page_size_kernel); 282 gmmu_page_size_kernel);
283 283
@@ -296,8 +296,8 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
296 296
297 if (unlikely(err)) { 297 if (unlikely(err)) {
298 nvgpu_err(g, "fail to alloc gr_ctx"); 298 nvgpu_err(g, "fail to alloc gr_ctx");
299 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 299 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
300 gr_ctx->mem.size, gmmu_page_size_kernel); 300 gmmu_page_size_kernel);
301 nvgpu_kfree(g, gr_ctx); 301 nvgpu_kfree(g, gr_ctx);
302 } else { 302 } else {
303 gr_ctx->virt_ctx = p->gr_ctx_handle; 303 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -323,8 +323,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 323 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
324 WARN_ON(err || msg.ret); 324 WARN_ON(err || msg.ret);
325 325
326 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 326 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
327 gmmu_page_size_kernel); 327 gmmu_page_size_kernel);
328 nvgpu_kfree(g, gr_ctx); 328 nvgpu_kfree(g, gr_ctx);
329 } 329 }
330} 330}
@@ -349,7 +349,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
349 gk20a_dbg_fn(""); 349 gk20a_dbg_fn("");
350 350
351 patch_ctx->mem.size = 128 * sizeof(u32); 351 patch_ctx->mem.size = 128 * sizeof(u32);
352 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, 352 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
353 patch_ctx->mem.size, 353 patch_ctx->mem.size,
354 gmmu_page_size_kernel); 354 gmmu_page_size_kernel);
355 if (!patch_ctx->mem.gpu_va) 355 if (!patch_ctx->mem.gpu_va)
@@ -361,8 +361,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
361 p->patch_ctx_va = patch_ctx->mem.gpu_va; 361 p->patch_ctx_va = patch_ctx->mem.gpu_va;
362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 362 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
363 if (err || msg.ret) { 363 if (err || msg.ret) {
364 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 364 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
365 patch_ctx->mem.size, gmmu_page_size_kernel); 365 gmmu_page_size_kernel);
366 err = -ENOMEM; 366 err = -ENOMEM;
367 } 367 }
368 368
@@ -387,8 +387,8 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 387 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
388 WARN_ON(err || msg.ret); 388 WARN_ON(err || msg.ret);
389 389
390 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 390 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
391 patch_ctx->mem.size, gmmu_page_size_kernel); 391 gmmu_page_size_kernel);
392 patch_ctx->mem.gpu_va = 0; 392 patch_ctx->mem.gpu_va = 0;
393 } 393 }
394} 394}
@@ -413,8 +413,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 413 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
414 WARN_ON(err || msg.ret); 414 WARN_ON(err || msg.ret);
415 415
416 gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 416 __nvgpu_vm_free_va(c->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 gmmu_page_size_kernel);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -1046,7 +1046,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1046 1046
1047 /* Allocate buffer if necessary */ 1047 /* Allocate buffer if necessary */
1048 if (pm_ctx->mem.gpu_va == 0) { 1048 if (pm_ctx->mem.gpu_va == 0) {
1049 pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, 1049 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1050 g->gr.ctx_vars.pm_ctxsw_image_size, 1050 g->gr.ctx_vars.pm_ctxsw_image_size,
1051 gmmu_page_size_kernel); 1051 gmmu_page_size_kernel);
1052 1052
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index b42fbcb3..b8b5985c 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -106,7 +106,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
106 106
107 /* Allocate (or validate when map_offset != 0) the virtual address. */ 107 /* Allocate (or validate when map_offset != 0) the virtual address. */
108 if (!map_offset) { 108 if (!map_offset) {
109 map_offset = gk20a_vm_alloc_va(vm, size, 109 map_offset = __nvgpu_vm_alloc_va(vm, size,
110 pgsz_idx); 110 pgsz_idx);
111 if (!map_offset) { 111 if (!map_offset) {
112 nvgpu_err(g, "failed to allocate va space\n"); 112 nvgpu_err(g, "failed to allocate va space\n");
@@ -180,7 +180,7 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
180 gk20a_dbg_fn(""); 180 gk20a_dbg_fn("");
181 181
182 if (va_allocated) { 182 if (va_allocated) {
183 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 183 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
184 if (err) { 184 if (err) {
185 dev_err(dev_from_vm(vm), 185 dev_err(dev_from_vm(vm),
186 "failed to free va"); 186 "failed to free va");