aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-06-12 06:51:35 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2018-06-14 03:16:12 -0400
commit697b9a8714cb4631fd0526b3c78955d5422c24ba (patch)
treecd96e54f9bd567b2e435d698ed0debcc72c97cc1 /drivers/gpu/drm/i915/i915_request.c
parente4dd27aadd205417a2e9ea9902b698a0252ec3a0 (diff)
drm/i915: Make closing request flush mandatory
For symmetry, simplicity and ensuring the request is always truly idle upon its completion, always emit the closing flush prior to emitting the request breadcrumb. Previously, we would only emit the flush if we had started a user batch, but this just leaves all the other paths open to speculation (do they affect the GPU caches or not?) With mm switching, a key requirement is that the GPU is flushed and invalidated before hand, so for absolute safety, we want that closing flush be mandatory. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180612105135.4459-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9092f5464c24..e1dbb544046f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1018,14 +1018,13 @@ i915_request_await_object(struct i915_request *to,
1018 * request is not being tracked for completion but the work itself is 1018 * request is not being tracked for completion but the work itself is
1019 * going to happen on the hardware. This would be a Bad Thing(tm). 1019 * going to happen on the hardware. This would be a Bad Thing(tm).
1020 */ 1020 */
1021void __i915_request_add(struct i915_request *request, bool flush_caches) 1021void i915_request_add(struct i915_request *request)
1022{ 1022{
1023 struct intel_engine_cs *engine = request->engine; 1023 struct intel_engine_cs *engine = request->engine;
1024 struct i915_timeline *timeline = request->timeline; 1024 struct i915_timeline *timeline = request->timeline;
1025 struct intel_ring *ring = request->ring; 1025 struct intel_ring *ring = request->ring;
1026 struct i915_request *prev; 1026 struct i915_request *prev;
1027 u32 *cs; 1027 u32 *cs;
1028 int err;
1029 1028
1030 GEM_TRACE("%s fence %llx:%d\n", 1029 GEM_TRACE("%s fence %llx:%d\n",
1031 engine->name, request->fence.context, request->fence.seqno); 1030 engine->name, request->fence.context, request->fence.seqno);
@@ -1046,20 +1045,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
1046 * know that it is time to use that space up. 1045 * know that it is time to use that space up.
1047 */ 1046 */
1048 request->reserved_space = 0; 1047 request->reserved_space = 0;
1049 1048 engine->emit_flush(request, EMIT_FLUSH);
1050 /*
1051 * Emit any outstanding flushes - execbuf can fail to emit the flush
1052 * after having emitted the batchbuffer command. Hence we need to fix
1053 * things up similar to emitting the lazy request. The difference here
1054 * is that the flush _must_ happen before the next request, no matter
1055 * what.
1056 */
1057 if (flush_caches) {
1058 err = engine->emit_flush(request, EMIT_FLUSH);
1059
1060 /* Not allowed to fail! */
1061 WARN(err, "engine->emit_flush() failed: %d!\n", err);
1062 }
1063 1049
1064 /* 1050 /*
1065 * Record the position of the start of the breadcrumb so that 1051 * Record the position of the start of the breadcrumb so that