From 57821e215756b3df7acc9c0eb5017e39f141d381 Mon Sep 17 00:00:00 2001 From: Peter Daifuku Date: Fri, 21 Oct 2016 15:26:15 -0700 Subject: gpu: nvgpu: vgpu: alloc hwpm ctxt buf on client In hypervisor mode, all GPU VA allocations must be done by client; fix this for the allocation of the hwpm ctxt buffer Bug 200231611 Change-Id: I0270b1298308383a969a47d0a859ed53c20594ef Signed-off-by: Peter Daifuku Reviewed-on: http://git-master/r/1240913 (cherry picked from commit 49314d42b13e27dc2f8c1e569a8c3e750173148d) Reviewed-on: http://git-master/r/1245867 (cherry picked from commit d0b10e84d90d0fd61eca8be0f9e879d9cec71d3e) Reviewed-on: http://git-master/r/1246700 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu') diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 89223091..654b3ca3 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -91,8 +91,10 @@ int vgpu_gr_init_ctx_state(struct gk20a *g) g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; + g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size; if (!g->gr.ctx_vars.golden_image_size || - !g->gr.ctx_vars.zcull_ctxsw_image_size) + !g->gr.ctx_vars.zcull_ctxsw_image_size || + !g->gr.ctx_vars.pm_ctxsw_image_size) return -ENXIO; gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; @@ -390,12 +392,13 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; + struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; int err; gk20a_dbg_fn(""); /* check if hwpm was ever initialized. If not, nothing to do */ - if (ch_ctx->pm_ctx.ctx_was_enabled == false) + if (pm_ctx->mem.gpu_va == 0) return; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; @@ -404,7 +407,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - ch_ctx->pm_ctx.ctx_was_enabled = false; + gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 0); + pm_ctx->mem.gpu_va = 0; } static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) @@ -1040,28 +1044,35 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, struct channel_gk20a *ch, bool enable) { + struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; + struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; gk20a_dbg_fn(""); - msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; - msg.handle = vgpu_get_handle(g); - p->handle = ch->virt_ctx; - - /* If we just enabled HWPM context switching, flag this - * so we know we need to free the buffer when channel contexts - * are cleaned up. - */ if (enable) { - struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; - ch_ctx->pm_ctx.ctx_was_enabled = true; - p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; + + /* Allocate buffer if necessary */ + if (pm_ctx->mem.gpu_va == 0) { + pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, + g->gr.ctx_vars.pm_ctxsw_image_size, + gmmu_page_size_kernel); + + if (!pm_ctx->mem.gpu_va) + return -ENOMEM; + pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; + } } else p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; + msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; + msg.handle = vgpu_get_handle(g); + p->handle = ch->virt_ctx; + p->gpu_va = pm_ctx->mem.gpu_va; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); -- cgit v1.2.2