summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
diff options
context:
space:
mode:
authorAnup Mahindre <amahindre@nvidia.com>2018-09-05 00:32:35 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 18:53:56 -0400
commit7e591dced99f328f4960702dbb6235fe7dc7f6b5 (patch)
tree083e4c74b511c1fa6f785221c02e8cf8efe6ea52 /drivers/gpu/nvgpu/gm20b/gr_gm20b.c
parenta77bce7193dbd877d00a8bd69e2964db42419a8d (diff)
gpu: nvgpu: Remove NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE API
Remove the API as it has no use cases. Bug 200445906 Change-Id: Ia2803bd05d78853963011a67091b34ba5bdb3732 Signed-off-by: Anup Mahindre <amahindre@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1817629 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c80
1 files changed, 0 insertions, 80 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 200f58a3..368c9321 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -1306,22 +1306,6 @@ static void gm20b_gr_read_sm_error_state(struct gk20a *g,
1306 1306
1307} 1307}
1308 1308
1309static void gm20b_gr_write_sm_error_state(struct gk20a *g,
1310 u32 offset,
1311 struct nvgpu_tsg_sm_error_state *sm_error_states)
1312{
1313 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
1314 sm_error_states->hww_global_esr);
1315 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset,
1316 sm_error_states->hww_warp_esr);
1317 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_pc_r() + offset,
1318 u64_lo32(sm_error_states->hww_warp_esr_pc));
1319 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
1320 sm_error_states->hww_global_esr_report_mask);
1321 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
1322 sm_error_states->hww_warp_esr_report_mask);
1323}
1324
1325int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 1309int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
1326 struct channel_gk20a *fault_ch) 1310 struct channel_gk20a *fault_ch)
1327{ 1311{
@@ -1356,70 +1340,6 @@ record_fail:
1356 return sm_id; 1340 return sm_id;
1357} 1341}
1358 1342
1359int gm20b_gr_update_sm_error_state(struct gk20a *g,
1360 struct channel_gk20a *ch, u32 sm_id,
1361 struct nvgpu_tsg_sm_error_state *sm_error_state)
1362{
1363 u32 gpc, tpc, offset;
1364 struct tsg_gk20a *tsg;
1365 struct nvgpu_gr_ctx *ch_ctx;
1366 struct nvgpu_tsg_sm_error_state *tsg_sm_error_states;
1367 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
1368 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
1369 GPU_LIT_TPC_IN_GPC_STRIDE);
1370 int err = 0;
1371
1372 tsg = tsg_gk20a_from_ch(ch);
1373 if (!tsg) {
1374 return -EINVAL;
1375 }
1376
1377 ch_ctx = &tsg->gr_ctx;
1378
1379 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1380
1381 tsg_sm_error_states = tsg->sm_error_states + sm_id;
1382 gk20a_tsg_update_sm_error_state_locked(tsg, sm_id, sm_error_state);
1383
1384 err = gr_gk20a_disable_ctxsw(g);
1385 if (err) {
1386 nvgpu_err(g, "unable to stop gr ctxsw");
1387 goto fail;
1388 }
1389
1390 gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
1391 tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
1392
1393 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
1394
1395 if (gk20a_is_channel_ctx_resident(ch)) {
1396 gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
1397 } else {
1398 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
1399 if (err) {
1400 goto enable_ctxsw;
1401 }
1402
1403 gr_gk20a_ctx_patch_write(g, ch_ctx,
1404 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
1405 tsg_sm_error_states->hww_global_esr_report_mask,
1406 true);
1407 gr_gk20a_ctx_patch_write(g, ch_ctx,
1408 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
1409 tsg_sm_error_states->hww_warp_esr_report_mask,
1410 true);
1411
1412 gr_gk20a_ctx_patch_write_end(g, ch_ctx, false);
1413 }
1414
1415enable_ctxsw:
1416 err = gr_gk20a_enable_ctxsw(g);
1417
1418fail:
1419 nvgpu_mutex_release(&g->dbg_sessions_lock);
1420 return err;
1421}
1422
1423int gm20b_gr_clear_sm_error_state(struct gk20a *g, 1343int gm20b_gr_clear_sm_error_state(struct gk20a *g,
1424 struct channel_gk20a *ch, u32 sm_id) 1344 struct channel_gk20a *ch, u32 sm_id)
1425{ 1345{