summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index e91830f8..049b8da2 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2981,7 +2981,7 @@ static u32 gk20a_fifo_get_preempt_timeout(struct gk20a *g)
2981} 2981}
2982 2982
2983int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 2983int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
2984 unsigned int id_type) 2984 unsigned int id_type, bool preempt_retries_left)
2985{ 2985{
2986 struct nvgpu_timeout timeout; 2986 struct nvgpu_timeout timeout;
2987 u32 delay = GR_IDLE_CHECK_DEFAULT; 2987 u32 delay = GR_IDLE_CHECK_DEFAULT;
@@ -3037,7 +3037,8 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)
3037 RC_TYPE_PREEMPT_TIMEOUT); 3037 RC_TYPE_PREEMPT_TIMEOUT);
3038} 3038}
3039 3039
3040int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) 3040int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg,
3041 bool preempt_retries_left)
3041{ 3042{
3042 int ret; 3043 int ret;
3043 unsigned int id_type; 3044 unsigned int id_type;
@@ -3049,8 +3050,17 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
3049 3050
3050 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; 3051 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL;
3051 3052
3052 /* wait for preempt */ 3053 /*
3053 ret = g->ops.fifo.is_preempt_pending(g, id, id_type); 3054 * Poll for preempt done. if stalling interrupts are pending
3055 * while preempt is in progress we poll for stalling interrupts
3056 * to finish based on return value from this function and
3057 * retry preempt again.
3058 * If HW is hung, on the last retry instance we try to identify
3059 * the engines hung and set the runlist reset_eng_bitmask
3060 * and mark preemption completion.
3061 */
3062 ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
3063 preempt_retries_left);
3054 3064
3055 return ret; 3065 return ret;
3056} 3066}
@@ -3072,7 +3082,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
3072 3082
3073 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3083 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3074 3084
3075 ret = __locked_fifo_preempt(g, ch->chid, false); 3085 ret = __locked_fifo_preempt(g, ch->chid, false, false);
3076 3086
3077 if (!mutex_ret) { 3087 if (!mutex_ret) {
3078 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3088 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3112,7 +3122,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
3112 3122
3113 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3123 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3114 3124
3115 ret = __locked_fifo_preempt(g, tsg->tsgid, true); 3125 ret = __locked_fifo_preempt(g, tsg->tsgid, true, false);
3116 3126
3117 if (!mutex_ret) { 3127 if (!mutex_ret) {
3118 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3128 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3785,7 +3795,7 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
3785 gk20a_readl(g, fifo_preempt_r())); 3795 gk20a_readl(g, fifo_preempt_r()));
3786#endif 3796#endif
3787 if (wait_preempt) { 3797 if (wait_preempt) {
3788 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type); 3798 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type, false);
3789 } 3799 }
3790#ifdef TRACEPOINTS_ENABLED 3800#ifdef TRACEPOINTS_ENABLED
3791 trace_gk20a_reschedule_preempted_next(ch->chid); 3801 trace_gk20a_reschedule_preempted_next(ch->chid);