summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c80
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.h3
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c1
3 files changed, 0 insertions, 84 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 200f58a3..368c9321 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -1306,22 +1306,6 @@ static void gm20b_gr_read_sm_error_state(struct gk20a *g,
1306 1306
1307} 1307}
1308 1308
1309static void gm20b_gr_write_sm_error_state(struct gk20a *g,
1310 u32 offset,
1311 struct nvgpu_tsg_sm_error_state *sm_error_states)
1312{
1313 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
1314 sm_error_states->hww_global_esr);
1315 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset,
1316 sm_error_states->hww_warp_esr);
1317 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_pc_r() + offset,
1318 u64_lo32(sm_error_states->hww_warp_esr_pc));
1319 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
1320 sm_error_states->hww_global_esr_report_mask);
1321 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
1322 sm_error_states->hww_warp_esr_report_mask);
1323}
1324
1325int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 1309int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
1326 struct channel_gk20a *fault_ch) 1310 struct channel_gk20a *fault_ch)
1327{ 1311{
@@ -1356,70 +1340,6 @@ record_fail:
1356 return sm_id; 1340 return sm_id;
1357} 1341}
1358 1342
1359int gm20b_gr_update_sm_error_state(struct gk20a *g,
1360 struct channel_gk20a *ch, u32 sm_id,
1361 struct nvgpu_tsg_sm_error_state *sm_error_state)
1362{
1363 u32 gpc, tpc, offset;
1364 struct tsg_gk20a *tsg;
1365 struct nvgpu_gr_ctx *ch_ctx;
1366 struct nvgpu_tsg_sm_error_state *tsg_sm_error_states;
1367 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
1368 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
1369 GPU_LIT_TPC_IN_GPC_STRIDE);
1370 int err = 0;
1371
1372 tsg = tsg_gk20a_from_ch(ch);
1373 if (!tsg) {
1374 return -EINVAL;
1375 }
1376
1377 ch_ctx = &tsg->gr_ctx;
1378
1379 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1380
1381 tsg_sm_error_states = tsg->sm_error_states + sm_id;
1382 gk20a_tsg_update_sm_error_state_locked(tsg, sm_id, sm_error_state);
1383
1384 err = gr_gk20a_disable_ctxsw(g);
1385 if (err) {
1386 nvgpu_err(g, "unable to stop gr ctxsw");
1387 goto fail;
1388 }
1389
1390 gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
1391 tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
1392
1393 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
1394
1395 if (gk20a_is_channel_ctx_resident(ch)) {
1396 gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
1397 } else {
1398 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
1399 if (err) {
1400 goto enable_ctxsw;
1401 }
1402
1403 gr_gk20a_ctx_patch_write(g, ch_ctx,
1404 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
1405 tsg_sm_error_states->hww_global_esr_report_mask,
1406 true);
1407 gr_gk20a_ctx_patch_write(g, ch_ctx,
1408 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
1409 tsg_sm_error_states->hww_warp_esr_report_mask,
1410 true);
1411
1412 gr_gk20a_ctx_patch_write_end(g, ch_ctx, false);
1413 }
1414
1415enable_ctxsw:
1416 err = gr_gk20a_enable_ctxsw(g);
1417
1418fail:
1419 nvgpu_mutex_release(&g->dbg_sessions_lock);
1420 return err;
1421}
1422
1423int gm20b_gr_clear_sm_error_state(struct gk20a *g, 1343int gm20b_gr_clear_sm_error_state(struct gk20a *g,
1424 struct channel_gk20a *ch, u32 sm_id) 1344 struct channel_gk20a *ch, u32 sm_id)
1425{ 1345{
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
index 0a486c2e..7402478d 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
@@ -117,9 +117,6 @@ void gr_gm20b_get_access_map(struct gk20a *g,
117 u32 **whitelist, int *num_entries); 117 u32 **whitelist, int *num_entries);
118int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, 118int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc,
119 u32 tpc, u32 sm, struct channel_gk20a *fault_ch); 119 u32 tpc, u32 sm, struct channel_gk20a *fault_ch);
120int gm20b_gr_update_sm_error_state(struct gk20a *g,
121 struct channel_gk20a *ch, u32 sm_id,
122 struct nvgpu_tsg_sm_error_state *sm_error_state);
123int gm20b_gr_clear_sm_error_state(struct gk20a *g, 120int gm20b_gr_clear_sm_error_state(struct gk20a *g,
124 struct channel_gk20a *ch, u32 sm_id); 121 struct channel_gk20a *ch, u32 sm_id);
125int gr_gm20b_get_preemption_mode_flags(struct gk20a *g, 122int gr_gm20b_get_preemption_mode_flags(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 772a4a85..114d259a 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -275,7 +275,6 @@ static const struct gpu_ops gm20b_ops = {
275 .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode, 275 .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
276 .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode, 276 .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
277 .record_sm_error_state = gm20b_gr_record_sm_error_state, 277 .record_sm_error_state = gm20b_gr_record_sm_error_state,
278 .update_sm_error_state = gm20b_gr_update_sm_error_state,
279 .clear_sm_error_state = gm20b_gr_clear_sm_error_state, 278 .clear_sm_error_state = gm20b_gr_clear_sm_error_state,
280 .suspend_contexts = gr_gk20a_suspend_contexts, 279 .suspend_contexts = gr_gk20a_suspend_contexts,
281 .resume_contexts = gr_gk20a_resume_contexts, 280 .resume_contexts = gr_gk20a_resume_contexts,