summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c65
1 files changed, 64 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
index fa2010cd..a0662956 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtualized GPU Graphics 2 * Virtualized GPU Graphics
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -610,6 +610,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
610{ 610{
611 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 611 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
612 u32 gpc_index; 612 u32 gpc_index;
613 u32 sm_per_tpc;
613 int err = -ENOMEM; 614 int err = -ENOMEM;
614 615
615 gk20a_dbg_fn(""); 616 gk20a_dbg_fn("");
@@ -628,8 +629,10 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
628 if (!gr->gpc_tpc_mask) 629 if (!gr->gpc_tpc_mask)
629 goto cleanup; 630 goto cleanup;
630 631
632 sm_per_tpc = priv->constants.sm_per_tpc;
631 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * 633 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count *
632 gr->max_tpc_per_gpc_count * 634 gr->max_tpc_per_gpc_count *
635 sm_per_tpc *
633 sizeof(struct sm_info)); 636 sizeof(struct sm_info));
634 if (!gr->sm_to_cluster) 637 if (!gr->sm_to_cluster)
635 goto cleanup; 638 goto cleanup;
@@ -1215,3 +1218,63 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
1215 1218
1216 nvgpu_mutex_release(&g->dbg_sessions_lock); 1219 nvgpu_mutex_release(&g->dbg_sessions_lock);
1217} 1220}
1221
1222int vgpu_gr_init_sm_id_table(struct gk20a *g)
1223{
1224 struct tegra_vgpu_cmd_msg msg = {};
1225 struct tegra_vgpu_vsms_mapping_params *p = &msg.params.vsms_mapping;
1226 struct tegra_vgpu_vsms_mapping_entry *entry;
1227 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
1228 struct sm_info *sm_info;
1229 int err;
1230 struct gr_gk20a *gr = &g->gr;
1231 size_t oob_size;
1232 void *handle = NULL;
1233 u32 sm_id;
1234 u32 max_sm;
1235
1236 msg.cmd = TEGRA_VGPU_CMD_GET_VSMS_MAPPING;
1237 msg.handle = vgpu_get_handle(g);
1238 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
1239 err = err ? err : msg.ret;
1240 if (err) {
1241 nvgpu_err(g, "get vsms mapping failed err %d", err);
1242 return err;
1243 }
1244
1245 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
1246 tegra_gr_comm_get_server_vmid(),
1247 TEGRA_VGPU_QUEUE_CMD,
1248 (void **)&entry, &oob_size);
1249 if (!handle)
1250 return -EINVAL;
1251
1252 max_sm = gr->gpc_count *
1253 gr->max_tpc_per_gpc_count *
1254 priv->constants.sm_per_tpc;
1255 if (p->num_sm > max_sm)
1256 return -EINVAL;
1257
1258 if ((p->num_sm * sizeof(*entry)) > oob_size)
1259 return -EINVAL;
1260
1261 gr->no_of_sm = p->num_sm;
1262 for (sm_id = 0; sm_id < p->num_sm; sm_id++, entry++) {
1263 sm_info = &gr->sm_to_cluster[sm_id];
1264 sm_info->tpc_index = entry->tpc_index;
1265 sm_info->gpc_index = entry->gpc_index;
1266 sm_info->sm_index = entry->sm_index;
1267 sm_info->global_tpc_index = entry->global_tpc_index;
1268 }
1269 tegra_gr_comm_oob_put_ptr(handle);
1270
1271 return 0;
1272}
1273
1274int vgpu_gr_init_fs_state(struct gk20a *g)
1275{
1276 if (!g->ops.gr.init_sm_id_table)
1277 return -EINVAL;
1278
1279 return g->ops.gr.init_sm_id_table(g);
1280}