summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-03-21 11:10:59 -0400
committerAlexander Van Brunt <avanbrunt@nvidia.com>2015-05-05 16:57:34 -0400
commit2204f2a524af40d8e5fa0f3d764b61fefa397989 (patch)
tree25b2cb694526c4cb8bb484288db5953642cd842d /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent5486503343ca9db24d7ce4acd4f7cf22e1f515f2 (diff)
gpu: nvgpu: Use common allocator for patch
Reduce amount of duplicate code around memory allocation by using common helpers, and common data structure for storing results of allocations. Bug 1605769 Change-Id: Idf51831e8be9cabe1ab9122b18317137fde6339f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/721030 Reviewed-on: http://git-master/r/737530 Reviewed-by: Alexander Van Brunt <avanbrunt@nvidia.com> Tested-by: Alexander Van Brunt <avanbrunt@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index fd8bb81b..8d0bb6cf 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -321,18 +321,20 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
321 321
322 gk20a_dbg_fn(""); 322 gk20a_dbg_fn("");
323 323
324 patch_ctx->size = 128 * sizeof(u32); 324 patch_ctx->mem.size = 128 * sizeof(u32);
325 patch_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, patch_ctx->size, 0); 325 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm,
326 if (!patch_ctx->gpu_va) 326 patch_ctx->mem.size, 0);
327 if (!patch_ctx->mem.gpu_va)
327 return -ENOMEM; 328 return -ENOMEM;
328 329
329 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; 330 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
330 msg.handle = platform->virt_handle; 331 msg.handle = platform->virt_handle;
331 p->handle = c->virt_ctx; 332 p->handle = c->virt_ctx;
332 p->patch_ctx_va = patch_ctx->gpu_va; 333 p->patch_ctx_va = patch_ctx->mem.gpu_va;
333 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 334 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
334 if (err || msg.ret) { 335 if (err || msg.ret) {
335 gk20a_vm_free_va(ch_vm, patch_ctx->gpu_va, patch_ctx->size, 0); 336 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
337 patch_ctx->mem.size, 0);
336 err = -ENOMEM; 338 err = -ENOMEM;
337 } 339 }
338 340
@@ -347,7 +349,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
347 349
348 gk20a_dbg_fn(""); 350 gk20a_dbg_fn("");
349 351
350 if (patch_ctx->gpu_va) { 352 if (patch_ctx->mem.gpu_va) {
351 struct tegra_vgpu_cmd_msg msg; 353 struct tegra_vgpu_cmd_msg msg;
352 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 354 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
353 int err; 355 int err;
@@ -358,8 +360,9 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
358 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 360 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
359 WARN_ON(err || msg.ret); 361 WARN_ON(err || msg.ret);
360 362
361 gk20a_vm_free_va(ch_vm, patch_ctx->gpu_va, patch_ctx->size, 0); 363 gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
362 patch_ctx->gpu_va = 0; 364 patch_ctx->mem.size, 0);
365 patch_ctx->mem.gpu_va = 0;
363 } 366 }
364} 367}
365 368
@@ -438,7 +441,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
438 } 441 }
439 442
440 /* allocate patch buffer */ 443 /* allocate patch buffer */
441 if (ch_ctx->patch_ctx.pages == NULL) { 444 if (ch_ctx->patch_ctx.mem.pages == NULL) {
442 err = vgpu_gr_alloc_channel_patch_ctx(g, c); 445 err = vgpu_gr_alloc_channel_patch_ctx(g, c);
443 if (err) { 446 if (err) {
444 gk20a_err(dev_from_gk20a(g), 447 gk20a_err(dev_from_gk20a(g),