summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-07-21 19:51:40 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-08-15 14:41:16 -0400
commite1438818b90c5b0d73aae800b12bd6b36aec5142 (patch)
treef0582cda23552526c3067e90f4cb74b461d50d73 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent33ff34887f560449828e79170a2a36a97496eeec (diff)
gpu: nvgpu: vgpu: add vgpu private data and helper functions
Move vgpu private data to a dedicated structure and allocate it at probe time. Also add virt_handle helper function which is used everywhere. JIRA VFND-2103 Change-Id: I125911420be72ca9be948125d8357fa85d1d3afd Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1185206 GVS: Gerrit_Virtual_Submit Reviewed-by: Vladislav Buzov <vbuzov@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c89
1 files changed, 31 insertions, 58 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index f395ac1e..5477bca0 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -19,7 +19,6 @@
19 19
20static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) 20static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
21{ 21{
22 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
23 struct tegra_vgpu_cmd_msg msg; 22 struct tegra_vgpu_cmd_msg msg;
24 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 23 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
25 int err; 24 int err;
@@ -27,7 +26,7 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
27 gk20a_dbg_fn(""); 26 gk20a_dbg_fn("");
28 27
29 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; 28 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
30 msg.handle = platform->virt_handle; 29 msg.handle = vgpu_get_handle(c->g);
31 p->handle = c->virt_ctx; 30 p->handle = c->virt_ctx;
32 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 31 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
33 32
@@ -37,7 +36,6 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
37static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, 36static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
38 struct channel_gk20a *c, bool patch) 37 struct channel_gk20a *c, bool patch)
39{ 38{
40 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
41 struct tegra_vgpu_cmd_msg msg; 39 struct tegra_vgpu_cmd_msg msg;
42 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 40 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
43 int err; 41 int err;
@@ -45,7 +43,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
45 gk20a_dbg_fn(""); 43 gk20a_dbg_fn("");
46 44
47 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; 45 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
48 msg.handle = platform->virt_handle; 46 msg.handle = vgpu_get_handle(g);
49 p->handle = c->virt_ctx; 47 p->handle = c->virt_ctx;
50 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 48 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
51 49
@@ -56,7 +54,6 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
56static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, 54static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
57 struct channel_gk20a *c) 55 struct channel_gk20a *c)
58{ 56{
59 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
60 struct tegra_vgpu_cmd_msg msg; 57 struct tegra_vgpu_cmd_msg msg;
61 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 58 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
62 int err; 59 int err;
@@ -64,7 +61,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
64 gk20a_dbg_fn(""); 61 gk20a_dbg_fn("");
65 62
66 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; 63 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
67 msg.handle = platform->virt_handle; 64 msg.handle = vgpu_get_handle(g);
68 p->handle = c->virt_ctx; 65 p->handle = c->virt_ctx;
69 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 66 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
70 67
@@ -73,15 +70,14 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
73 70
74int vgpu_gr_init_ctx_state(struct gk20a *g) 71int vgpu_gr_init_ctx_state(struct gk20a *g)
75{ 72{
76 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
77 struct gr_gk20a *gr = &g->gr; 73 struct gr_gk20a *gr = &g->gr;
78 74
79 gk20a_dbg_fn(""); 75 gk20a_dbg_fn("");
80 76
81 vgpu_get_attribute(platform->virt_handle, 77 vgpu_get_attribute(vgpu_get_handle(g),
82 TEGRA_VGPU_ATTRIB_GOLDEN_CTX_SIZE, 78 TEGRA_VGPU_ATTRIB_GOLDEN_CTX_SIZE,
83 &g->gr.ctx_vars.golden_image_size); 79 &g->gr.ctx_vars.golden_image_size);
84 vgpu_get_attribute(platform->virt_handle, 80 vgpu_get_attribute(vgpu_get_handle(g),
85 TEGRA_VGPU_ATTRIB_ZCULL_CTX_SIZE, 81 TEGRA_VGPU_ATTRIB_ZCULL_CTX_SIZE,
86 &g->gr.ctx_vars.zcull_ctxsw_image_size); 82 &g->gr.ctx_vars.zcull_ctxsw_image_size);
87 if (!g->gr.ctx_vars.golden_image_size || 83 if (!g->gr.ctx_vars.golden_image_size ||
@@ -128,7 +124,6 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
128static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, 124static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
129 struct channel_gk20a *c) 125 struct channel_gk20a *c)
130{ 126{
131 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
132 struct tegra_vgpu_cmd_msg msg; 127 struct tegra_vgpu_cmd_msg msg;
133 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 128 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
134 struct vm_gk20a *ch_vm = c->vm; 129 struct vm_gk20a *ch_vm = c->vm;
@@ -183,7 +178,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
183 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; 178 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
184 179
185 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; 180 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
186 msg.handle = platform->virt_handle; 181 msg.handle = vgpu_get_handle(g);
187 p->handle = c->virt_ctx; 182 p->handle = c->virt_ctx;
188 p->cb_va = g_bfr_va[CIRCULAR_VA]; 183 p->cb_va = g_bfr_va[CIRCULAR_VA];
189 p->attr_va = g_bfr_va[ATTRIBUTE_VA]; 184 p->attr_va = g_bfr_va[ATTRIBUTE_VA];
@@ -209,7 +204,6 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
209 204
210static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) 205static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
211{ 206{
212 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
213 struct vm_gk20a *ch_vm = c->vm; 207 struct vm_gk20a *ch_vm = c->vm;
214 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; 208 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
215 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; 209 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
@@ -223,7 +217,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
223 int err; 217 int err;
224 218
225 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX; 219 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX;
226 msg.handle = platform->virt_handle; 220 msg.handle = vgpu_get_handle(c->g);
227 p->handle = c->virt_ctx; 221 p->handle = c->virt_ctx;
228 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
229 WARN_ON(err || msg.ret); 223 WARN_ON(err || msg.ret);
@@ -245,7 +239,6 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
245 u32 class, 239 u32 class,
246 u32 flags) 240 u32 flags)
247{ 241{
248 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
249 struct tegra_vgpu_cmd_msg msg = {0}; 242 struct tegra_vgpu_cmd_msg msg = {0};
250 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 243 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
251 struct gr_gk20a *gr = &g->gr; 244 struct gr_gk20a *gr = &g->gr;
@@ -276,7 +269,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
276 } 269 }
277 270
278 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; 271 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
279 msg.handle = platform->virt_handle; 272 msg.handle = vgpu_get_handle(g);
280 p->as_handle = vm->handle; 273 p->as_handle = vm->handle;
281 p->gr_ctx_va = gr_ctx->mem.gpu_va; 274 p->gr_ctx_va = gr_ctx->mem.gpu_va;
282 p->class_num = class; 275 p->class_num = class;
@@ -302,13 +295,12 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
302 gk20a_dbg_fn(""); 295 gk20a_dbg_fn("");
303 296
304 if (gr_ctx && gr_ctx->mem.gpu_va) { 297 if (gr_ctx && gr_ctx->mem.gpu_va) {
305 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
306 struct tegra_vgpu_cmd_msg msg; 298 struct tegra_vgpu_cmd_msg msg;
307 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 299 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
308 int err; 300 int err;
309 301
310 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; 302 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
311 msg.handle = platform->virt_handle; 303 msg.handle = vgpu_get_handle(g);
312 p->gr_ctx_handle = gr_ctx->virt_ctx; 304 p->gr_ctx_handle = gr_ctx->virt_ctx;
313 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 305 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
314 WARN_ON(err || msg.ret); 306 WARN_ON(err || msg.ret);
@@ -329,7 +321,6 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
329static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, 321static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
330 struct channel_gk20a *c) 322 struct channel_gk20a *c)
331{ 323{
332 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
333 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; 324 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
334 struct vm_gk20a *ch_vm = c->vm; 325 struct vm_gk20a *ch_vm = c->vm;
335 struct tegra_vgpu_cmd_msg msg; 326 struct tegra_vgpu_cmd_msg msg;
@@ -346,7 +337,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
346 return -ENOMEM; 337 return -ENOMEM;
347 338
348 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; 339 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
349 msg.handle = platform->virt_handle; 340 msg.handle = vgpu_get_handle(g);
350 p->handle = c->virt_ctx; 341 p->handle = c->virt_ctx;
351 p->patch_ctx_va = patch_ctx->mem.gpu_va; 342 p->patch_ctx_va = patch_ctx->mem.gpu_va;
352 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 343 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -361,7 +352,6 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
361 352
362static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) 353static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
363{ 354{
364 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
365 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; 355 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
366 struct vm_gk20a *ch_vm = c->vm; 356 struct vm_gk20a *ch_vm = c->vm;
367 357
@@ -373,7 +363,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
373 int err; 363 int err;
374 364
375 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX; 365 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX;
376 msg.handle = platform->virt_handle; 366 msg.handle = vgpu_get_handle(c->g);
377 p->handle = c->virt_ctx; 367 p->handle = c->virt_ctx;
378 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 368 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
379 WARN_ON(err || msg.ret); 369 WARN_ON(err || msg.ret);
@@ -386,7 +376,6 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
386 376
387static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) 377static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
388{ 378{
389 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
390 struct tegra_vgpu_cmd_msg msg; 379 struct tegra_vgpu_cmd_msg msg;
391 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; 380 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx;
392 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; 381 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
@@ -399,7 +388,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
399 return; 388 return;
400 389
401 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; 390 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX;
402 msg.handle = platform->virt_handle; 391 msg.handle = vgpu_get_handle(c->g);
403 p->handle = c->virt_ctx; 392 p->handle = c->virt_ctx;
404 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 393 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
405 WARN_ON(err || msg.ret); 394 WARN_ON(err || msg.ret);
@@ -427,7 +416,6 @@ static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
427 416
428static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) 417static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
429{ 418{
430 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
431 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx; 419 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx;
432 struct tegra_vgpu_cmd_msg msg = {0}; 420 struct tegra_vgpu_cmd_msg msg = {0};
433 struct tegra_vgpu_channel_bind_gr_ctx_params *p = 421 struct tegra_vgpu_channel_bind_gr_ctx_params *p =
@@ -435,7 +423,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
435 int err; 423 int err;
436 424
437 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; 425 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX;
438 msg.handle = platform->virt_handle; 426 msg.handle = vgpu_get_handle(c->g);
439 p->ch_handle = c->virt_ctx; 427 p->ch_handle = c->virt_ctx;
440 p->gr_ctx_handle = gr_ctx->virt_ctx; 428 p->gr_ctx_handle = gr_ctx->virt_ctx;
441 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 429 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -447,7 +435,6 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
447 435
448static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) 436static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
449{ 437{
450 struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
451 struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx; 438 struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx;
452 struct tegra_vgpu_cmd_msg msg = {0}; 439 struct tegra_vgpu_cmd_msg msg = {0};
453 struct tegra_vgpu_tsg_bind_gr_ctx_params *p = 440 struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
@@ -455,7 +442,7 @@ static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
455 int err; 442 int err;
456 443
457 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX; 444 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
458 msg.handle = platform->virt_handle; 445 msg.handle = vgpu_get_handle(tsg->g);
459 p->tsg_id = tsg->tsgid; 446 p->tsg_id = tsg->tsgid;
460 p->gr_ctx_handle = gr_ctx->virt_ctx; 447 p->gr_ctx_handle = gr_ctx->virt_ctx;
461 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 448 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -621,12 +608,11 @@ static int vgpu_gr_free_obj_ctx(struct channel_gk20a *c,
621 608
622static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index) 609static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index)
623{ 610{
624 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
625 u32 data; 611 u32 data;
626 612
627 WARN_ON(gpc_index > 0); 613 WARN_ON(gpc_index > 0);
628 614
629 if (vgpu_get_attribute(platform->virt_handle, 615 if (vgpu_get_attribute(vgpu_get_handle(g),
630 TEGRA_VGPU_ATTRIB_GPC0_TPC_COUNT, &data)) 616 TEGRA_VGPU_ATTRIB_GPC0_TPC_COUNT, &data))
631 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_count"); 617 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_count");
632 return data; 618 return data;
@@ -634,26 +620,25 @@ static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index)
634 620
635static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) 621static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
636{ 622{
637 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
638 u32 gpc_index; 623 u32 gpc_index;
639 624
640 gk20a_dbg_fn(""); 625 gk20a_dbg_fn("");
641 626
642 if (vgpu_get_attribute(platform->virt_handle, 627 if (vgpu_get_attribute(vgpu_get_handle(g),
643 TEGRA_VGPU_ATTRIB_GPC_COUNT, &gr->gpc_count)) 628 TEGRA_VGPU_ATTRIB_GPC_COUNT, &gr->gpc_count))
644 return -ENOMEM; 629 return -ENOMEM;
645 630
646 if (vgpu_get_attribute(platform->virt_handle, 631 if (vgpu_get_attribute(vgpu_get_handle(g),
647 TEGRA_VGPU_ATTRIB_MAX_TPC_PER_GPC_COUNT, 632 TEGRA_VGPU_ATTRIB_MAX_TPC_PER_GPC_COUNT,
648 &gr->max_tpc_per_gpc_count)) 633 &gr->max_tpc_per_gpc_count))
649 return -ENOMEM; 634 return -ENOMEM;
650 635
651 if (vgpu_get_attribute(platform->virt_handle, 636 if (vgpu_get_attribute(vgpu_get_handle(g),
652 TEGRA_VGPU_ATTRIB_MAX_TPC_COUNT, 637 TEGRA_VGPU_ATTRIB_MAX_TPC_COUNT,
653 &gr->max_tpc_count)) 638 &gr->max_tpc_count))
654 return -ENOMEM; 639 return -ENOMEM;
655 640
656 if (vgpu_get_attribute(platform->virt_handle, 641 if (vgpu_get_attribute(vgpu_get_handle(g),
657 TEGRA_VGPU_ATTRIB_TPC_COUNT, 642 TEGRA_VGPU_ATTRIB_TPC_COUNT,
658 &gr->tpc_count)) 643 &gr->tpc_count))
659 return -ENOMEM; 644 return -ENOMEM;
@@ -701,7 +686,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
701 struct channel_gk20a *c, u64 zcull_va, 686 struct channel_gk20a *c, u64 zcull_va,
702 u32 mode) 687 u32 mode)
703{ 688{
704 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
705 struct tegra_vgpu_cmd_msg msg; 689 struct tegra_vgpu_cmd_msg msg;
706 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; 690 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
707 int err; 691 int err;
@@ -709,7 +693,7 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
709 gk20a_dbg_fn(""); 693 gk20a_dbg_fn("");
710 694
711 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; 695 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
712 msg.handle = platform->virt_handle; 696 msg.handle = vgpu_get_handle(g);
713 p->handle = c->virt_ctx; 697 p->handle = c->virt_ctx;
714 p->zcull_va = zcull_va; 698 p->zcull_va = zcull_va;
715 p->mode = mode; 699 p->mode = mode;
@@ -721,7 +705,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
721static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, 705static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
722 struct gr_zcull_info *zcull_params) 706 struct gr_zcull_info *zcull_params)
723{ 707{
724 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
725 struct tegra_vgpu_cmd_msg msg; 708 struct tegra_vgpu_cmd_msg msg;
726 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; 709 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
727 int err; 710 int err;
@@ -729,7 +712,7 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
729 gk20a_dbg_fn(""); 712 gk20a_dbg_fn("");
730 713
731 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; 714 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
732 msg.handle = platform->virt_handle; 715 msg.handle = vgpu_get_handle(g);
733 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 716 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
734 if (err || msg.ret) 717 if (err || msg.ret)
735 return -ENOMEM; 718 return -ENOMEM;
@@ -752,12 +735,11 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
752 735
753static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) 736static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
754{ 737{
755 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
756 u32 data; 738 u32 data;
757 739
758 WARN_ON(gpc_index > 0); 740 WARN_ON(gpc_index > 0);
759 741
760 if (vgpu_get_attribute(platform->virt_handle, 742 if (vgpu_get_attribute(vgpu_get_handle(g),
761 TEGRA_VGPU_ATTRIB_GPC0_TPC_MASK, &data)) 743 TEGRA_VGPU_ATTRIB_GPC0_TPC_MASK, &data))
762 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_mask"); 744 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_mask");
763 745
@@ -766,12 +748,11 @@ static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
766 748
767static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) 749static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
768{ 750{
769 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
770 u32 max_fbps_count = 0; 751 u32 max_fbps_count = 0;
771 752
772 gk20a_dbg_fn(""); 753 gk20a_dbg_fn("");
773 754
774 if (vgpu_get_attribute(platform->virt_handle, 755 if (vgpu_get_attribute(vgpu_get_handle(g),
775 TEGRA_VGPU_ATTRIB_NUM_FBPS, &max_fbps_count)) 756 TEGRA_VGPU_ATTRIB_NUM_FBPS, &max_fbps_count))
776 gk20a_err(dev_from_gk20a(g), "failed to retrieve num fbps"); 757 gk20a_err(dev_from_gk20a(g), "failed to retrieve num fbps");
777 758
@@ -780,12 +761,11 @@ static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
780 761
781static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) 762static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
782{ 763{
783 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
784 u32 fbp_en_mask = 0; 764 u32 fbp_en_mask = 0;
785 765
786 gk20a_dbg_fn(""); 766 gk20a_dbg_fn("");
787 767
788 if (vgpu_get_attribute(platform->virt_handle, 768 if (vgpu_get_attribute(vgpu_get_handle(g),
789 TEGRA_VGPU_ATTRIB_FBP_EN_MASK, &fbp_en_mask)) 769 TEGRA_VGPU_ATTRIB_FBP_EN_MASK, &fbp_en_mask))
790 gk20a_err(dev_from_gk20a(g), "failed to retrieve fbp en mask"); 770 gk20a_err(dev_from_gk20a(g), "failed to retrieve fbp en mask");
791 771
@@ -794,12 +774,11 @@ static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
794 774
795static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) 775static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
796{ 776{
797 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
798 u32 val = 0; 777 u32 val = 0;
799 778
800 gk20a_dbg_fn(""); 779 gk20a_dbg_fn("");
801 780
802 if (vgpu_get_attribute(platform->virt_handle, 781 if (vgpu_get_attribute(vgpu_get_handle(g),
803 TEGRA_VGPU_ATTRIB_MAX_LTC_PER_FBP, &val)) 782 TEGRA_VGPU_ATTRIB_MAX_LTC_PER_FBP, &val))
804 gk20a_err(dev_from_gk20a(g), "failed to retrieve max ltc per fbp"); 783 gk20a_err(dev_from_gk20a(g), "failed to retrieve max ltc per fbp");
805 784
@@ -808,12 +787,11 @@ static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
808 787
809static u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) 788static u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
810{ 789{
811 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
812 u32 val = 0; 790 u32 val = 0;
813 791
814 gk20a_dbg_fn(""); 792 gk20a_dbg_fn("");
815 793
816 if (vgpu_get_attribute(platform->virt_handle, 794 if (vgpu_get_attribute(vgpu_get_handle(g),
817 TEGRA_VGPU_ATTRIB_MAX_LTS_PER_LTC, &val)) 795 TEGRA_VGPU_ATTRIB_MAX_LTS_PER_LTC, &val))
818 gk20a_err(dev_from_gk20a(g), "failed to retrieve lts per ltc"); 796 gk20a_err(dev_from_gk20a(g), "failed to retrieve lts per ltc");
819 797
@@ -829,7 +807,6 @@ static u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
829static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, 807static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
830 struct zbc_entry *zbc_val) 808 struct zbc_entry *zbc_val)
831{ 809{
832 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
833 struct tegra_vgpu_cmd_msg msg = {0}; 810 struct tegra_vgpu_cmd_msg msg = {0};
834 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; 811 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
835 int err; 812 int err;
@@ -837,7 +814,7 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
837 gk20a_dbg_fn(""); 814 gk20a_dbg_fn("");
838 815
839 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; 816 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
840 msg.handle = platform->virt_handle; 817 msg.handle = vgpu_get_handle(g);
841 818
842 p->type = zbc_val->type; 819 p->type = zbc_val->type;
843 p->format = zbc_val->format; 820 p->format = zbc_val->format;
@@ -861,7 +838,6 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
861static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, 838static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
862 struct zbc_query_params *query_params) 839 struct zbc_query_params *query_params)
863{ 840{
864 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
865 struct tegra_vgpu_cmd_msg msg = {0}; 841 struct tegra_vgpu_cmd_msg msg = {0};
866 struct tegra_vgpu_zbc_query_table_params *p = 842 struct tegra_vgpu_zbc_query_table_params *p =
867 &msg.params.zbc_query_table; 843 &msg.params.zbc_query_table;
@@ -870,7 +846,7 @@ static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
870 gk20a_dbg_fn(""); 846 gk20a_dbg_fn("");
871 847
872 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; 848 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
873 msg.handle = platform->virt_handle; 849 msg.handle = vgpu_get_handle(g);
874 850
875 p->type = query_params->type; 851 p->type = query_params->type;
876 p->index_size = query_params->index_size; 852 p->index_size = query_params->index_size;
@@ -1048,7 +1024,6 @@ int vgpu_gr_nonstall_isr(struct gk20a *g,
1048static int vgpu_gr_set_sm_debug_mode(struct gk20a *g, 1024static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1049 struct channel_gk20a *ch, u64 sms, bool enable) 1025 struct channel_gk20a *ch, u64 sms, bool enable)
1050{ 1026{
1051 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1052 struct tegra_vgpu_cmd_msg msg; 1027 struct tegra_vgpu_cmd_msg msg;
1053 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; 1028 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1054 int err; 1029 int err;
@@ -1056,7 +1031,7 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1056 gk20a_dbg_fn(""); 1031 gk20a_dbg_fn("");
1057 1032
1058 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; 1033 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1059 msg.handle = platform->virt_handle; 1034 msg.handle = vgpu_get_handle(g);
1060 p->handle = ch->virt_ctx; 1035 p->handle = ch->virt_ctx;
1061 p->sms = sms; 1036 p->sms = sms;
1062 p->enable = (u32)enable; 1037 p->enable = (u32)enable;
@@ -1069,7 +1044,6 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1069static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, 1044static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1070 struct channel_gk20a *ch, bool enable) 1045 struct channel_gk20a *ch, bool enable)
1071{ 1046{
1072 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1073 struct tegra_vgpu_cmd_msg msg; 1047 struct tegra_vgpu_cmd_msg msg;
1074 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1048 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1075 int err; 1049 int err;
@@ -1077,7 +1051,7 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1077 gk20a_dbg_fn(""); 1051 gk20a_dbg_fn("");
1078 1052
1079 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; 1053 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1080 msg.handle = platform->virt_handle; 1054 msg.handle = vgpu_get_handle(g);
1081 p->handle = ch->virt_ctx; 1055 p->handle = ch->virt_ctx;
1082 1056
1083 if (enable) 1057 if (enable)
@@ -1094,7 +1068,6 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1094static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, 1068static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1095 struct channel_gk20a *ch, bool enable) 1069 struct channel_gk20a *ch, bool enable)
1096{ 1070{
1097 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1098 struct tegra_vgpu_cmd_msg msg; 1071 struct tegra_vgpu_cmd_msg msg;
1099 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1072 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1100 int err; 1073 int err;
@@ -1102,7 +1075,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1102 gk20a_dbg_fn(""); 1075 gk20a_dbg_fn("");
1103 1076
1104 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; 1077 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1105 msg.handle = platform->virt_handle; 1078 msg.handle = vgpu_get_handle(g);
1106 p->handle = ch->virt_ctx; 1079 p->handle = ch->virt_ctx;
1107 1080
1108 /* If we just enabled HWPM context switching, flag this 1081 /* If we just enabled HWPM context switching, flag this