aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-08-28 11:27:02 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2018-08-29 08:49:08 -0400
commit9e4fa01221b3230320135072ad31ea809ca31147 (patch)
treecf55e999e87d16a214426b67d785b6507e36dd5d
parentd8c5d29f21bf0bc690fd8c26c54197221e235bc9 (diff)
drm/i915/execlists: Flush tasklet directly from reset-finish
On finishing the reset, the intention is to restart the GPU before we relinquish the forcewake taken to handle the reset - the goal being the GPU reloads a context before it is allowed to sleep. For this purpose, we used tasklet_flush() which although it accomplished the goal of restarting the GPU, carried with it a sting in its tail: it cleared the TASKLET_STATE_SCHED bit. This meant that if another CPU queued a new request to this engine, we would clear the flag and later attempt to requeue the tasklet on the local CPU, breaking the per-cpu softirq lists. Remove the dangerous tasklet_kill() and just run the tasklet func directly as we know it is safe to do so (the tasklets are internally locked to allow mixed usage from direct submission). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180828152702.27536-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c17
2 files changed, 6 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e46592956872..599c4f6eb1ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -82,12 +82,6 @@ static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
82 tasklet_unlock_wait(t); 82 tasklet_unlock_wait(t);
83} 83}
84 84
85static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
86{
87 if (atomic_dec_return(&t->count) == 0)
88 tasklet_kill(t);
89}
90
91static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) 85static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
92{ 86{
93 return !atomic_read(&t->count); 87 return !atomic_read(&t->count);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 36050f085071..f8ceb9c99dd6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1962,21 +1962,16 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
1962{ 1962{
1963 struct intel_engine_execlists * const execlists = &engine->execlists; 1963 struct intel_engine_execlists * const execlists = &engine->execlists;
1964 1964
1965 /* After a GPU reset, we may have requests to replay */
1966 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
1967 tasklet_schedule(&execlists->tasklet);
1968
1969 /* 1965 /*
1970 * Flush the tasklet while we still have the forcewake to be sure 1966 * After a GPU reset, we may have requests to replay. Do so now while
1971 * that it is not allowed to sleep before we restart and reload a 1967 * we still have the forcewake to be sure that the GPU is not allowed
1972 * context. 1968 * to sleep before we restart and reload a context.
1973 * 1969 *
1974 * As before (with execlists_reset_prepare) we rely on the caller
1975 * serialising multiple attempts to reset so that we know that we
1976 * are the only one manipulating tasklet state.
1977 */ 1970 */
1978 __tasklet_enable_sync_once(&execlists->tasklet); 1971 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
1972 execlists->tasklet.func(execlists->tasklet.data);
1979 1973
1974 tasklet_enable(&execlists->tasklet);
1980 GEM_TRACE("%s: depth->%d\n", engine->name, 1975 GEM_TRACE("%s: depth->%d\n", engine->name,
1981 atomic_read(&execlists->tasklet.count)); 1976 atomic_read(&execlists->tasklet.count));
1982} 1977}