summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorPeter Daifuku <pdaifuku@nvidia.com>2016-03-23 12:43:43 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-08 15:34:50 -0400
commit6eeabfbdd08e48f924885952c80ff41aa2b534b7 (patch)
tree5cdba48865faa0b76e20d0994fa9de9e4c12deed /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parente8bac374c0ed24f05bf389e1e8b5aca47f61bd3a (diff)
gpu: nvgpu: vgpu: virtualized SMPC/HWPM ctx switch
Add support for SMPC and HWPM context switching when virtualized Bug 1648200 JIRASW EVLR-219 JIRASW EVLR-253 Change-Id: I80a1613eaad87d8510f00d9aef001400d642ecdf Signed-off-by: Peter Daifuku <pdaifuku@nvidia.com> Reviewed-on: http://git-master/r/1122034 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 32e451ed..16d51ad3 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -402,12 +402,36 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
402 } 402 }
403} 403}
404 404
405static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
406{
407 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
408 struct tegra_vgpu_cmd_msg msg;
409 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx;
410 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
411 int err;
412
413 gk20a_dbg_fn("");
414
415 /* check if hwpm was ever initialized. If not, nothing to do */
416 if (ch_ctx->pm_ctx.ctx_was_enabled == false)
417 return;
418
419 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX;
420 msg.handle = platform->virt_handle;
421 p->handle = c->virt_ctx;
422 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
423 WARN_ON(err || msg.ret);
424
425 ch_ctx->pm_ctx.ctx_was_enabled = false;
426}
427
405static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) 428static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
406{ 429{
407 gk20a_dbg_fn(""); 430 gk20a_dbg_fn("");
408 431
409 vgpu_gr_unmap_global_ctx_buffers(c); 432 vgpu_gr_unmap_global_ctx_buffers(c);
410 vgpu_gr_free_channel_patch_ctx(c); 433 vgpu_gr_free_channel_patch_ctx(c);
434 vgpu_gr_free_channel_pm_ctx(c);
411 if (!gk20a_is_channel_marked_as_tsg(c)) 435 if (!gk20a_is_channel_marked_as_tsg(c))
412 vgpu_gr_free_channel_gr_ctx(c); 436 vgpu_gr_free_channel_gr_ctx(c);
413 437
@@ -950,6 +974,63 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
950 return err ? err : msg.ret; 974 return err ? err : msg.ret;
951} 975}
952 976
977static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
978 struct channel_gk20a *ch, bool enable)
979{
980 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
981 struct tegra_vgpu_cmd_msg msg;
982 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
983 int err;
984
985 gk20a_dbg_fn("");
986
987 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
988 msg.handle = platform->virt_handle;
989 p->handle = ch->virt_ctx;
990
991 if (enable)
992 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW;
993 else
994 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW;
995
996 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
997 WARN_ON(err || msg.ret);
998
999 return err ? err : msg.ret;
1000}
1001
1002static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1003 struct channel_gk20a *ch, bool enable)
1004{
1005 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1006 struct tegra_vgpu_cmd_msg msg;
1007 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1008 int err;
1009
1010 gk20a_dbg_fn("");
1011
1012 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1013 msg.handle = platform->virt_handle;
1014 p->handle = ch->virt_ctx;
1015
1016 /* If we just enabled HWPM context switching, flag this
1017 * so we know we need to free the buffer when channel contexts
1018 * are cleaned up.
1019 */
1020 if (enable) {
1021 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
1022 ch_ctx->pm_ctx.ctx_was_enabled = true;
1023
1024 p->mode = TEGRA_VGPU_CTXSW_MODE_CTXSW;
1025 } else
1026 p->mode = TEGRA_VGPU_CTXSW_MODE_NO_CTXSW;
1027
1028 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1029 WARN_ON(err || msg.ret);
1030
1031 return err ? err : msg.ret;
1032}
1033
953void vgpu_init_gr_ops(struct gpu_ops *gops) 1034void vgpu_init_gr_ops(struct gpu_ops *gops)
954{ 1035{
955 gops->gr.free_channel_ctx = vgpu_gr_free_channel_ctx; 1036 gops->gr.free_channel_ctx = vgpu_gr_free_channel_ctx;
@@ -969,4 +1050,6 @@ void vgpu_init_gr_ops(struct gpu_ops *gops)
969 gops->gr.zbc_query_table = vgpu_gr_query_zbc; 1050 gops->gr.zbc_query_table = vgpu_gr_query_zbc;
970 gops->gr.init_ctx_state = vgpu_gr_init_ctx_state; 1051 gops->gr.init_ctx_state = vgpu_gr_init_ctx_state;
971 gops->gr.set_sm_debug_mode = vgpu_gr_set_sm_debug_mode; 1052 gops->gr.set_sm_debug_mode = vgpu_gr_set_sm_debug_mode;
1053 gops->gr.update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode;
1054 gops->gr.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode;
972} 1055}