summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-03-20 15:59:09 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 22:17:24 -0400
commit1b7b271980094637cf34a9d8ad14cb36f2c36363 (patch)
treea897b5d9a013c14866f938673da6a8d8fe80ef51 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent21f1396d1c80010470e0f071fabe84279b3aebae (diff)
gpu: nvgpu: Use common allocator for context
Reduce amount of duplicate code around memory allocation by using common helpers, and common data structure for storing results of allocations. Bug 1605769 Change-Id: I10c226e2377aa867a5cf11be61d08a9d67206b1d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/720507
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 60880f6d..fd8bb81b 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -107,17 +107,17 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
107 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 107 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
108 108
109 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 109 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
110 gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size; 110 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
111 111
112 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 112 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
113 gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size; 113 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
114 114
115 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 115 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
116 gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size; 116 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
117 117
118 gk20a_dbg_info("priv access map size : %d", 118 gk20a_dbg_info("priv access map size : %d",
119 gr->ctx_vars.priv_access_map_size); 119 gr->ctx_vars.priv_access_map_size);
120 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size = 120 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
121 gr->ctx_vars.priv_access_map_size; 121 gr->ctx_vars.priv_access_map_size;
122 122
123 return 0; 123 return 0;
@@ -143,38 +143,38 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
143 143
144 /* Circular Buffer */ 144 /* Circular Buffer */
145 gpu_va = gk20a_vm_alloc_va(ch_vm, 145 gpu_va = gk20a_vm_alloc_va(ch_vm,
146 gr->global_ctx_buffer[CIRCULAR].size, 0); 146 gr->global_ctx_buffer[CIRCULAR].mem.size, 0);
147 147
148 if (!gpu_va) 148 if (!gpu_va)
149 goto clean_up; 149 goto clean_up;
150 g_bfr_va[CIRCULAR_VA] = gpu_va; 150 g_bfr_va[CIRCULAR_VA] = gpu_va;
151 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].size; 151 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
152 152
153 /* Attribute Buffer */ 153 /* Attribute Buffer */
154 gpu_va = gk20a_vm_alloc_va(ch_vm, 154 gpu_va = gk20a_vm_alloc_va(ch_vm,
155 gr->global_ctx_buffer[ATTRIBUTE].size, 0); 155 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0);
156 156
157 if (!gpu_va) 157 if (!gpu_va)
158 goto clean_up; 158 goto clean_up;
159 g_bfr_va[ATTRIBUTE_VA] = gpu_va; 159 g_bfr_va[ATTRIBUTE_VA] = gpu_va;
160 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].size; 160 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
161 161
162 /* Page Pool */ 162 /* Page Pool */
163 gpu_va = gk20a_vm_alloc_va(ch_vm, 163 gpu_va = gk20a_vm_alloc_va(ch_vm,
164 gr->global_ctx_buffer[PAGEPOOL].size, 0); 164 gr->global_ctx_buffer[PAGEPOOL].mem.size, 0);
165 if (!gpu_va) 165 if (!gpu_va)
166 goto clean_up; 166 goto clean_up;
167 g_bfr_va[PAGEPOOL_VA] = gpu_va; 167 g_bfr_va[PAGEPOOL_VA] = gpu_va;
168 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].size; 168 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
169 169
170 /* Priv register Access Map */ 170 /* Priv register Access Map */
171 gpu_va = gk20a_vm_alloc_va(ch_vm, 171 gpu_va = gk20a_vm_alloc_va(ch_vm,
172 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size, 0); 172 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0);
173 if (!gpu_va) 173 if (!gpu_va)
174 goto clean_up; 174 goto clean_up;
175 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 175 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
176 g_bfr_size[PRIV_ACCESS_MAP_VA] = 176 g_bfr_size[PRIV_ACCESS_MAP_VA] =
177 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size; 177 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
178 178
179 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; 179 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
180 msg.handle = platform->virt_handle; 180 msg.handle = platform->virt_handle;
@@ -257,10 +257,10 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
257 if (!gr_ctx) 257 if (!gr_ctx)
258 return -ENOMEM; 258 return -ENOMEM;
259 259
260 gr_ctx->size = gr->ctx_vars.buffer_total_size; 260 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
261 gr_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->size, 0); 261 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->mem.size, 0);
262 262
263 if (!gr_ctx->gpu_va) { 263 if (!gr_ctx->mem.gpu_va) {
264 kfree(gr_ctx); 264 kfree(gr_ctx);
265 return -ENOMEM; 265 return -ENOMEM;
266 } 266 }
@@ -268,13 +268,14 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
268 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX; 268 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX;
269 msg.handle = platform->virt_handle; 269 msg.handle = platform->virt_handle;
270 p->handle = c->virt_ctx; 270 p->handle = c->virt_ctx;
271 p->gr_ctx_va = gr_ctx->gpu_va; 271 p->gr_ctx_va = gr_ctx->mem.gpu_va;
272 p->class_num = c->obj_class; 272 p->class_num = c->obj_class;
273 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 273 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
274 274
275 if (err || msg.ret) { 275 if (err || msg.ret) {
276 kfree(gr_ctx); 276 kfree(gr_ctx);
277 gk20a_vm_free_va(ch_vm, gr_ctx->gpu_va, gr_ctx->size, 0); 277 gk20a_vm_free_va(ch_vm, gr_ctx->mem.gpu_va,
278 gr_ctx->mem.size, 0);
278 err = -ENOMEM; 279 err = -ENOMEM;
279 } else 280 } else
280 c->ch_ctx.gr_ctx = gr_ctx; 281 c->ch_ctx.gr_ctx = gr_ctx;
@@ -290,7 +291,7 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
290 291
291 gk20a_dbg_fn(""); 292 gk20a_dbg_fn("");
292 293
293 if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->gpu_va) { 294 if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->mem.gpu_va) {
294 struct tegra_vgpu_cmd_msg msg; 295 struct tegra_vgpu_cmd_msg msg;
295 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 296 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
296 int err; 297 int err;
@@ -301,9 +302,9 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
301 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 302 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
302 WARN_ON(err || msg.ret); 303 WARN_ON(err || msg.ret);
303 304
304 gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->gpu_va, 305 gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->mem.gpu_va,
305 ch_ctx->gr_ctx->size, 0); 306 ch_ctx->gr_ctx->mem.size, 0);
306 ch_ctx->gr_ctx->gpu_va = 0; 307 ch_ctx->gr_ctx->mem.gpu_va = 0;
307 kfree(ch_ctx->gr_ctx); 308 kfree(ch_ctx->gr_ctx);
308 } 309 }
309} 310}
@@ -429,7 +430,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
429 } 430 }
430 431
431 /* commit gr ctx buffer */ 432 /* commit gr ctx buffer */
432 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->gpu_va); 433 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
433 if (err) { 434 if (err) {
434 gk20a_err(dev_from_gk20a(g), 435 gk20a_err(dev_from_gk20a(g),
435 "fail to commit gr ctx buffer"); 436 "fail to commit gr ctx buffer");