summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorSivaram Nair <sivaramn@nvidia.com>2016-11-03 20:26:01 -0400
committerRakesh Babu Bodla <rbodla@nvidia.com>2016-11-04 11:34:18 -0400
commit5f1c2bc27fb9dd66ed046b0590afc365be5011bf (patch)
tree5b652aed11d537462f1850d528fdc6e7058a17e5 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent57821e215756b3df7acc9c0eb5017e39f141d381 (diff)
Revert "gpu: nvgpu: vgpu: alloc hwpm ctxt buf on client"
This reverts commit 57821e215756b3df7acc9c0eb5017e39f141d381. Change-Id: Ic4801115064ccbcd1435298a61871921d056b8ea Signed-off-by: Sivaram Nair <sivaramn@nvidia.com> Reviewed-on: http://git-master/r/1247825 Reviewed-by: Rakesh Babu Bodla <rbodla@nvidia.com> Tested-by: Rakesh Babu Bodla <rbodla@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c39
1 files changed, 14 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 654b3ca3..89223091 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -91,10 +91,8 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
91 91
92 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; 92 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
93 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; 93 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
94 g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size;
95 if (!g->gr.ctx_vars.golden_image_size || 94 if (!g->gr.ctx_vars.golden_image_size ||
96 !g->gr.ctx_vars.zcull_ctxsw_image_size || 95 !g->gr.ctx_vars.zcull_ctxsw_image_size)
97 !g->gr.ctx_vars.pm_ctxsw_image_size)
98 return -ENXIO; 96 return -ENXIO;
99 97
100 gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; 98 gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size;
@@ -392,13 +390,12 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
392 struct tegra_vgpu_cmd_msg msg; 390 struct tegra_vgpu_cmd_msg msg;
393 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; 391 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx;
394 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; 392 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
395 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
396 int err; 393 int err;
397 394
398 gk20a_dbg_fn(""); 395 gk20a_dbg_fn("");
399 396
400 /* check if hwpm was ever initialized. If not, nothing to do */ 397 /* check if hwpm was ever initialized. If not, nothing to do */
401 if (pm_ctx->mem.gpu_va == 0) 398 if (ch_ctx->pm_ctx.ctx_was_enabled == false)
402 return; 399 return;
403 400
404 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; 401 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX;
@@ -407,8 +404,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
407 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 404 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
408 WARN_ON(err || msg.ret); 405 WARN_ON(err || msg.ret);
409 406
410 gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 0); 407 ch_ctx->pm_ctx.ctx_was_enabled = false;
411 pm_ctx->mem.gpu_va = 0;
412} 408}
413 409
414static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) 410static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
@@ -1044,35 +1040,28 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1044static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, 1040static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1045 struct channel_gk20a *ch, bool enable) 1041 struct channel_gk20a *ch, bool enable)
1046{ 1042{
1047 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
1048 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
1049 struct tegra_vgpu_cmd_msg msg; 1043 struct tegra_vgpu_cmd_msg msg;
1050 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1044 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1051 int err; 1045 int err;
1052 1046
1053 gk20a_dbg_fn(""); 1047 gk20a_dbg_fn("");
1054 1048
1055 if (enable) { 1049 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1056 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; 1050 msg.handle = vgpu_get_handle(g);
1051 p->handle = ch->virt_ctx;
1057 1052
1058 /* Allocate buffer if necessary */ 1053 /* If we just enabled HWPM context switching, flag this
1059 if (pm_ctx->mem.gpu_va == 0) { 1054 * so we know we need to free the buffer when channel contexts
1060 pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, 1055 * are cleaned up.
1061 g->gr.ctx_vars.pm_ctxsw_image_size, 1056 */
1062 gmmu_page_size_kernel); 1057 if (enable) {
1058 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
1059 ch_ctx->pm_ctx.ctx_was_enabled = true;
1063 1060
1064 if (!pm_ctx->mem.gpu_va) 1061 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW;
1065 return -ENOMEM;
1066 pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
1067 }
1068 } else 1062 } else
1069 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; 1063 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW;
1070 1064
1071 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1072 msg.handle = vgpu_get_handle(g);
1073 p->handle = ch->virt_ctx;
1074 p->gpu_va = pm_ctx->mem.gpu_va;
1075
1076 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 1065 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1077 WARN_ON(err || msg.ret); 1066 WARN_ON(err || msg.ret);
1078 1067