summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv100/mc_gv100.c
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-06-27 01:57:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-19 16:54:26 -0400
commitb1d0d8ece83ba0aa7b1e7ea9062eedc5cd9e4e33 (patch)
tree5a88d345e23e05d3a3ca9018cedcf6b12958a20b /drivers/gpu/nvgpu/gv100/mc_gv100.c
parentd859c5f4a03b975dc493f72a35016e83adad279a (diff)
Revert "Revert: GV11B runlist preemption patches"
This reverts commit 0b02c8589dcc507865a8fd398431c45fbda2ba9c. Originally change was reverted as it was making ap_compute test on embedded-qnx-hv e3550-t194 fail. With fixes related to replacing tsg preempt with runlist preempt during teardown, preempt timeout set to 100 ms (earlier this was set to 1000ms for t194 and 3000ms for legacy chips) and not issuing preempt timeout recovery if preempt fails, helped resolve the issue. Bug 200426402 Change-Id: If9a68d028a155075444cc1bdf411057e3388d48e Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1762563 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv100/mc_gv100.c')
-rw-r--r--drivers/gpu/nvgpu/gv100/mc_gv100.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gv100/mc_gv100.c b/drivers/gpu/nvgpu/gv100/mc_gv100.c
index 46af100a..7d38a3fb 100644
--- a/drivers/gpu/nvgpu/gv100/mc_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/mc_gv100.c
@@ -66,15 +66,14 @@ bool gv100_mc_is_intr_nvlink_pending(struct gk20a *g, u32 mc_intr_0)
66 return (((mc_intr_0 & mc_intr_nvlink_pending_f()) != 0U) ? true : false); 66 return (((mc_intr_0 & mc_intr_nvlink_pending_f()) != 0U) ? true : false);
67} 67}
68 68
69bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id) 69bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id,
70 u32 *eng_intr_pending)
70{ 71{
71 u32 mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); 72 u32 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
72 u32 stall_intr, eng_intr_mask; 73 u32 stall_intr, eng_intr_mask;
73 74
74 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id); 75 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
75 if ((mc_intr_0 & eng_intr_mask) != 0U) { 76 *eng_intr_pending = mc_intr_0 & eng_intr_mask;
76 return true;
77 }
78 77
79 stall_intr = mc_intr_pfifo_pending_f() | 78 stall_intr = mc_intr_pfifo_pending_f() |
80 mc_intr_hub_pending_f() | 79 mc_intr_hub_pending_f() |
@@ -82,9 +81,10 @@ bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
82 mc_intr_pbus_pending_f() | 81 mc_intr_pbus_pending_f() |
83 mc_intr_ltc_pending_f() | 82 mc_intr_ltc_pending_f() |
84 mc_intr_nvlink_pending_f(); 83 mc_intr_nvlink_pending_f();
85 if ((mc_intr_0 & stall_intr) != 0U) {
86 return true;
87 }
88 84
89 return false; 85 nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
86 "mc_intr_0 = 0x%08x, eng_intr = 0x%08x",
87 mc_intr_0 & stall_intr, *eng_intr_pending);
88
89 return (mc_intr_0 & (eng_intr_mask | stall_intr)) != 0U;
90} 90}