summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-05-09 18:53:39 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-05-31 13:46:33 -0400
commita71ce831fbbca3ba8602e0b07ecd630c4a39f376 (patch)
tree7f5e1a1e0eea7c2d732c9e558d1161e7a1c59f79 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent9c4f3799d1318aeb81d23816f8493d115aff2f86 (diff)
gpu: nvgpu: vgpu: manage gr_ctx as independent resource
gr_ctx will managed as independent resource in RM server and vgpu can get a gr_ctx handle. Bug 1702773 Change-Id: I87251af61711f0d7997ce90df8a3de196a9b481a Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1144931 (cherry picked from commit 2efbd143adaf60570121f1c212dc6b6f3d5a1661) Reviewed-on: http://git-master/r/1150704 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c94
1 files changed, 50 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 420c714e..b9490ac9 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -21,7 +21,7 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
21{ 21{
22 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); 22 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
23 struct tegra_vgpu_cmd_msg msg; 23 struct tegra_vgpu_cmd_msg msg;
24 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 24 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
25 int err; 25 int err;
26 26
27 gk20a_dbg_fn(""); 27 gk20a_dbg_fn("");
@@ -39,7 +39,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
39{ 39{
40 struct gk20a_platform *platform = gk20a_get_platform(g->dev); 40 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
41 struct tegra_vgpu_cmd_msg msg; 41 struct tegra_vgpu_cmd_msg msg;
42 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 42 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
43 int err; 43 int err;
44 44
45 gk20a_dbg_fn(""); 45 gk20a_dbg_fn("");
@@ -58,7 +58,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
58{ 58{
59 struct gk20a_platform *platform = gk20a_get_platform(g->dev); 59 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
60 struct tegra_vgpu_cmd_msg msg; 60 struct tegra_vgpu_cmd_msg msg;
61 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 61 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
62 int err; 62 int err;
63 63
64 gk20a_dbg_fn(""); 64 gk20a_dbg_fn("");
@@ -130,7 +130,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
130{ 130{
131 struct gk20a_platform *platform = gk20a_get_platform(g->dev); 131 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
132 struct tegra_vgpu_cmd_msg msg; 132 struct tegra_vgpu_cmd_msg msg;
133 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 133 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
134 struct vm_gk20a *ch_vm = c->vm; 134 struct vm_gk20a *ch_vm = c->vm;
135 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; 135 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
136 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; 136 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
@@ -219,7 +219,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
219 219
220 if (c->ch_ctx.global_ctx_buffer_mapped) { 220 if (c->ch_ctx.global_ctx_buffer_mapped) {
221 struct tegra_vgpu_cmd_msg msg; 221 struct tegra_vgpu_cmd_msg msg;
222 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 222 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
223 int err; 223 int err;
224 224
225 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX; 225 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX;
@@ -246,10 +246,10 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
246 u32 flags) 246 u32 flags)
247{ 247{
248 struct gk20a_platform *platform = gk20a_get_platform(g->dev); 248 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
249 struct tegra_vgpu_cmd_msg msg; 249 struct tegra_vgpu_cmd_msg msg = {0};
250 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 250 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
251 struct gr_gk20a *gr = &g->gr; 251 struct gr_gk20a *gr = &g->gr;
252 struct gr_ctx_desc *gr_ctx = *__gr_ctx; 252 struct gr_ctx_desc *gr_ctx;
253 int err; 253 int err;
254 254
255 gk20a_dbg_fn(""); 255 gk20a_dbg_fn("");
@@ -261,6 +261,10 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
261 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; 261 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size;
262 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; 262 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
263 263
264 gr_ctx = kzalloc(sizeof(*gr_ctx), GFP_KERNEL);
265 if (!gr_ctx)
266 return -ENOMEM;
267
264 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; 268 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
265 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm, 269 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm,
266 gr_ctx->mem.size, 270 gr_ctx->mem.size,
@@ -271,49 +275,27 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
271 return -ENOMEM; 275 return -ENOMEM;
272 } 276 }
273 277
274 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX; 278 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
275 msg.handle = platform->virt_handle; 279 msg.handle = platform->virt_handle;
276 p->handle = gr_ctx->virt_ctx; 280 p->as_handle = vm->handle;
277 p->gr_ctx_va = gr_ctx->mem.gpu_va; 281 p->gr_ctx_va = gr_ctx->mem.gpu_va;
278 p->class_num = class; 282 p->class_num = class;
279 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 283 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
284 err = err ? err : msg.ret;
280 285
281 if (err || msg.ret) { 286 if (unlikely(err)) {
287 gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx");
282 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 288 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va,
283 gr_ctx->mem.size, 0); 289 gr_ctx->mem.size, 0);
284 kfree(gr_ctx); 290 kfree(gr_ctx);
285 err = -ENOMEM; 291 } else {
292 gr_ctx->virt_ctx = p->gr_ctx_handle;
293 *__gr_ctx = gr_ctx;
286 } 294 }
287 295
288 return err; 296 return err;
289} 297}
290 298
291static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
292 struct channel_gk20a *c,
293 u32 class,
294 u32 flags)
295{
296 struct gr_ctx_desc **gr_ctx = &c->ch_ctx.gr_ctx;
297 struct gr_ctx_desc *__gr_ctx = kzalloc(sizeof(*__gr_ctx), GFP_KERNEL);
298 int err;
299
300 gk20a_dbg_fn("");
301
302 if (!__gr_ctx)
303 return -ENOMEM;
304
305 __gr_ctx->virt_ctx = c->virt_ctx;
306 *gr_ctx = __gr_ctx;
307 err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, c->vm, class, flags);
308 if (err) {
309 kfree(__gr_ctx);
310 return err;
311 }
312
313 c->ch_ctx.gr_ctx = __gr_ctx;
314 return 0;
315}
316
317void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 299void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
318 struct gr_ctx_desc *gr_ctx) 300 struct gr_ctx_desc *gr_ctx)
319{ 301{
@@ -325,9 +307,9 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
325 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 307 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
326 int err; 308 int err;
327 309
328 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_CTX; 310 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
329 msg.handle = platform->virt_handle; 311 msg.handle = platform->virt_handle;
330 p->handle = gr_ctx->virt_ctx; 312 p->gr_ctx_handle = gr_ctx->virt_ctx;
331 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 313 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
332 WARN_ON(err || msg.ret); 314 WARN_ON(err || msg.ret);
333 315
@@ -351,7 +333,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
351 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; 333 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
352 struct vm_gk20a *ch_vm = c->vm; 334 struct vm_gk20a *ch_vm = c->vm;
353 struct tegra_vgpu_cmd_msg msg; 335 struct tegra_vgpu_cmd_msg msg;
354 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 336 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
355 int err; 337 int err;
356 338
357 gk20a_dbg_fn(""); 339 gk20a_dbg_fn("");
@@ -387,7 +369,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
387 369
388 if (patch_ctx->mem.gpu_va) { 370 if (patch_ctx->mem.gpu_va) {
389 struct tegra_vgpu_cmd_msg msg; 371 struct tegra_vgpu_cmd_msg msg;
390 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 372 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
391 int err; 373 int err;
392 374
393 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX; 375 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX;
@@ -443,6 +425,26 @@ static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
443 c->first_init = false; 425 c->first_init = false;
444} 426}
445 427
428static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
429{
430 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
431 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx;
432 struct tegra_vgpu_cmd_msg msg = {0};
433 struct tegra_vgpu_channel_bind_gr_ctx_params *p =
434 &msg.params.ch_bind_gr_ctx;
435 int err;
436
437 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX;
438 msg.handle = platform->virt_handle;
439 p->ch_handle = c->virt_ctx;
440 p->gr_ctx_handle = gr_ctx->virt_ctx;
441 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
442 err = err ? err : msg.ret;
443 WARN_ON(err);
444
445 return err;
446}
447
446static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, 448static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
447 struct nvgpu_alloc_obj_ctx_args *args) 449 struct nvgpu_alloc_obj_ctx_args *args)
448{ 450{
@@ -476,9 +478,13 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
476 478
477 /* allocate gr ctx buffer */ 479 /* allocate gr ctx buffer */
478 if (!ch_ctx->gr_ctx) { 480 if (!ch_ctx->gr_ctx) {
479 err = vgpu_gr_alloc_channel_gr_ctx(g, c, 481 err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
480 args->class_num, 482 c->vm,
481 args->flags); 483 args->class_num,
484 args->flags);
485 if (!err)
486 err = vgpu_gr_ch_bind_gr_ctx(c);
487
482 if (err) { 488 if (err) {
483 gk20a_err(dev_from_gk20a(g), 489 gk20a_err(dev_from_gk20a(g),
484 "fail to allocate gr ctx buffer"); 490 "fail to allocate gr ctx buffer");