From 1b7b271980094637cf34a9d8ad14cb36f2c36363 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Fri, 20 Mar 2015 12:59:09 -0700 Subject: gpu: nvgpu: Use common allocator for context Reduce amount of duplicate code around memory allocation by using common helpers, and common data structure for storing results of allocations. Bug 1605769 Change-Id: I10c226e2377aa867a5cf11be61d08a9d67206b1d Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/720507 --- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 45 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c') diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 60880f6d..fd8bb81b 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -107,17 +107,17 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); - gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size; + gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); - gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size; + gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); - gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size; + gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; gk20a_dbg_info("priv access map size : %d", gr->ctx_vars.priv_access_map_size); - gr->global_ctx_buffer[PRIV_ACCESS_MAP].size = + gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = gr->ctx_vars.priv_access_map_size; return 0; @@ -143,38 +143,38 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, /* Circular Buffer */ gpu_va = gk20a_vm_alloc_va(ch_vm, - gr->global_ctx_buffer[CIRCULAR].size, 0); + gr->global_ctx_buffer[CIRCULAR].mem.size, 0); if (!gpu_va) goto clean_up; g_bfr_va[CIRCULAR_VA] = gpu_va; - g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].size; + g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; /* Attribute Buffer */ gpu_va = gk20a_vm_alloc_va(ch_vm, - gr->global_ctx_buffer[ATTRIBUTE].size, 0); + gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0); if (!gpu_va) goto clean_up; g_bfr_va[ATTRIBUTE_VA] = gpu_va; - g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].size; + g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; /* Page Pool */ gpu_va = gk20a_vm_alloc_va(ch_vm, - gr->global_ctx_buffer[PAGEPOOL].size, 0); + gr->global_ctx_buffer[PAGEPOOL].mem.size, 0); if (!gpu_va) goto clean_up; g_bfr_va[PAGEPOOL_VA] = gpu_va; - g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].size; + g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; /* Priv register Access Map */ gpu_va = gk20a_vm_alloc_va(ch_vm, - gr->global_ctx_buffer[PRIV_ACCESS_MAP].size, 0); + gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0); if (!gpu_va) goto clean_up; g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; g_bfr_size[PRIV_ACCESS_MAP_VA] = - gr->global_ctx_buffer[PRIV_ACCESS_MAP].size; + gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; msg.handle = platform->virt_handle; @@ -257,10 +257,10 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g, if (!gr_ctx) return -ENOMEM; - gr_ctx->size = gr->ctx_vars.buffer_total_size; - gr_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->size, 0); + gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; + gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->mem.size, 0); - if (!gr_ctx->gpu_va) { + if (!gr_ctx->mem.gpu_va) { kfree(gr_ctx); return -ENOMEM; } @@ -268,13 +268,14 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g, msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX; msg.handle = platform->virt_handle; p->handle = c->virt_ctx; - p->gr_ctx_va = gr_ctx->gpu_va; + p->gr_ctx_va = gr_ctx->mem.gpu_va; p->class_num = c->obj_class; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { kfree(gr_ctx); - gk20a_vm_free_va(ch_vm, gr_ctx->gpu_va, gr_ctx->size, 0); + gk20a_vm_free_va(ch_vm, gr_ctx->mem.gpu_va, + gr_ctx->mem.size, 0); err = -ENOMEM; } else c->ch_ctx.gr_ctx = gr_ctx; @@ -290,7 +291,7 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c) gk20a_dbg_fn(""); - if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->gpu_va) { + if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->mem.gpu_va) { struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; int err; @@ -301,9 +302,9 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->gpu_va, - ch_ctx->gr_ctx->size, 0); - ch_ctx->gr_ctx->gpu_va = 0; + gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->mem.gpu_va, + ch_ctx->gr_ctx->mem.size, 0); + ch_ctx->gr_ctx->mem.gpu_va = 0; kfree(ch_ctx->gr_ctx); } } @@ -429,7 +430,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, } /* commit gr ctx buffer */ - err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->gpu_va); + err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); if (err) { gk20a_err(dev_from_gk20a(g), "fail to commit gr ctx buffer"); -- cgit v1.2.2