diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 39 |
2 files changed, 25 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 1a9bee5f..512d32e9 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -110,7 +110,6 @@ struct zcull_ctx_desc { | |||
110 | struct pm_ctx_desc { | 110 | struct pm_ctx_desc { |
111 | struct mem_desc mem; | 111 | struct mem_desc mem; |
112 | u32 pm_mode; | 112 | u32 pm_mode; |
113 | bool ctx_was_enabled; /* Used in the virtual case only */ | ||
114 | }; | 113 | }; |
115 | 114 | ||
116 | struct gk20a; | 115 | struct gk20a; |
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 01f5e1a5..65e3589b 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c | |||
@@ -91,8 +91,10 @@ int vgpu_gr_init_ctx_state(struct gk20a *g) | |||
91 | 91 | ||
92 | g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; | 92 | g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; |
93 | g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; | 93 | g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; |
94 | g->gr.ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size; | ||
94 | if (!g->gr.ctx_vars.golden_image_size || | 95 | if (!g->gr.ctx_vars.golden_image_size || |
95 | !g->gr.ctx_vars.zcull_ctxsw_image_size) | 96 | !g->gr.ctx_vars.zcull_ctxsw_image_size || |
97 | !g->gr.ctx_vars.pm_ctxsw_image_size) | ||
96 | return -ENXIO; | 98 | return -ENXIO; |
97 | 99 | ||
98 | gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; | 100 | gr->ctx_vars.buffer_size = g->gr.ctx_vars.golden_image_size; |
@@ -390,12 +392,13 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) | |||
390 | struct tegra_vgpu_cmd_msg msg; | 392 | struct tegra_vgpu_cmd_msg msg; |
391 | struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; | 393 | struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; |
392 | struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; | 394 | struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; |
395 | struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; | ||
393 | int err; | 396 | int err; |
394 | 397 | ||
395 | gk20a_dbg_fn(""); | 398 | gk20a_dbg_fn(""); |
396 | 399 | ||
397 | /* check if hwpm was ever initialized. If not, nothing to do */ | 400 | /* check if hwpm was ever initialized. If not, nothing to do */ |
398 | if (ch_ctx->pm_ctx.ctx_was_enabled == false) | 401 | if (pm_ctx->mem.gpu_va == 0) |
399 | return; | 402 | return; |
400 | 403 | ||
401 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; | 404 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; |
@@ -404,7 +407,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) | |||
404 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 407 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
405 | WARN_ON(err || msg.ret); | 408 | WARN_ON(err || msg.ret); |
406 | 409 | ||
407 | ch_ctx->pm_ctx.ctx_was_enabled = false; | 410 | gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 0); |
411 | pm_ctx->mem.gpu_va = 0; | ||
408 | } | 412 | } |
409 | 413 | ||
410 | static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) | 414 | static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) |
@@ -1019,28 +1023,35 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, | |||
1019 | static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, | 1023 | static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, |
1020 | struct channel_gk20a *ch, bool enable) | 1024 | struct channel_gk20a *ch, bool enable) |
1021 | { | 1025 | { |
1026 | struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; | ||
1027 | struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; | ||
1022 | struct tegra_vgpu_cmd_msg msg; | 1028 | struct tegra_vgpu_cmd_msg msg; |
1023 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; | 1029 | struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; |
1024 | int err; | 1030 | int err; |
1025 | 1031 | ||
1026 | gk20a_dbg_fn(""); | 1032 | gk20a_dbg_fn(""); |
1027 | 1033 | ||
1028 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; | ||
1029 | msg.handle = vgpu_get_handle(g); | ||
1030 | p->handle = ch->virt_ctx; | ||
1031 | |||
1032 | /* If we just enabled HWPM context switching, flag this | ||
1033 | * so we know we need to free the buffer when channel contexts | ||
1034 | * are cleaned up. | ||
1035 | */ | ||
1036 | if (enable) { | 1034 | if (enable) { |
1037 | struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; | ||
1038 | ch_ctx->pm_ctx.ctx_was_enabled = true; | ||
1039 | |||
1040 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; | 1035 | p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW; |
1036 | |||
1037 | /* Allocate buffer if necessary */ | ||
1038 | if (pm_ctx->mem.gpu_va == 0) { | ||
1039 | pm_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch->vm, | ||
1040 | g->gr.ctx_vars.pm_ctxsw_image_size, | ||
1041 | gmmu_page_size_kernel); | ||
1042 | |||
1043 | if (!pm_ctx->mem.gpu_va) | ||
1044 | return -ENOMEM; | ||
1045 | pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; | ||
1046 | } | ||
1041 | } else | 1047 | } else |
1042 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; | 1048 | p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW; |
1043 | 1049 | ||
1050 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; | ||
1051 | msg.handle = vgpu_get_handle(g); | ||
1052 | p->handle = ch->virt_ctx; | ||
1053 | p->gpu_va = pm_ctx->mem.gpu_va; | ||
1054 | |||
1044 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 1055 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
1045 | WARN_ON(err || msg.ret); | 1056 | WARN_ON(err || msg.ret); |
1046 | 1057 | ||