summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-05-31 18:33:50 -0400
committerTejal Kudav <tkudav@nvidia.com>2018-06-14 09:44:06 -0400
commit328a7bd3ffc9590c0c432724d45da9f25732c2a1 (patch)
tree12edca4ddd00dc8adf39a7267a63da7bf180f151 /drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
parent85b920442a617f47a7ef1f6c314c8096c2708574 (diff)
gpu: nvgpu: initialze bundle64 state
We receive bundle with address and 64 bit values from ucode on some platforms This patch adds the support to handle 64 bit values Add struct av64_gk20a to store an address and corresponding 64 bit value Add struct av64_list_gk20a to store count and list of av64_gk20a Add API alloc_av64_list_gk20a() to allocate the list that supports 64bit values In gr_gk20a_init_ctx_vars_fw(), if we see NETLIST_REGIONID_SW_BUNDLE64_INIT, load the bundle64 state into above local structures Add new HAL gops.gr.init_sw_bundle64() and call it from gk20a_init_sw_bundle() if defined Also load the bundle for simulation cases in gr_gk20a_init_ctx_vars_sim() Jira NVGPUT-96 Change-Id: I1ab7fb37ff91c5fbd968c93d714725b01fd4f59b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1736450 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
index 3f22a1b7..e357db19 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
@@ -57,6 +57,18 @@ static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len,
57 return 0; 57 return 0;
58} 58}
59 59
60static int gr_gk20a_alloc_load_netlist_av64(struct gk20a *g, u32 *src, u32 len,
61 struct av64_list_gk20a *av64_list)
62{
63 av64_list->count = len / sizeof(struct av64_gk20a);
64 if (!alloc_av64_list_gk20a(g, av64_list))
65 return -ENOMEM;
66
67 memcpy(av64_list->l, src, len);
68
69 return 0;
70}
71
60static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len, 72static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len,
61 struct aiv_list_gk20a *aiv_list) 73 struct aiv_list_gk20a *aiv_list)
62{ 74{
@@ -343,6 +355,14 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
343 if (err) 355 if (err)
344 goto clean_up; 356 goto clean_up;
345 break; 357 break;
358 case NETLIST_REGIONID_SW_BUNDLE64_INIT:
359 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE64_INIT");
360 err = gr_gk20a_alloc_load_netlist_av64(g,
361 src, size,
362 &g->gr.ctx_vars.sw_bundle64_init);
363 if (err)
364 goto clean_up;
365 break;
346 case NETLIST_REGIONID_NVPERF_PMCAU: 366 case NETLIST_REGIONID_NVPERF_PMCAU:
347 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMCAU"); 367 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMCAU");
348 err = gr_gk20a_alloc_load_netlist_aiv(g, 368 err = gr_gk20a_alloc_load_netlist_aiv(g,
@@ -403,6 +423,7 @@ clean_up:
403 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l); 423 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l);
404 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l); 424 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l);
405 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l); 425 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l);
426 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle64_init.l);
406 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_cau.l); 427 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_cau.l);
407 nvgpu_release_firmware(g, netlist_fw); 428 nvgpu_release_firmware(g, netlist_fw);
408 err = -ENOENT; 429 err = -ENOENT;