aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:50:24 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:58:19 -0400
commitc7fe7d25ed6036ff16b1c112463baff21c3b205d (patch)
tree7a57dbe985bbdf4834511d7fc78aa7abbaf7b890 /drivers/gpu/drm/i915/intel_lrc.c
parentaad29fbbb86dbac69e25433b14c8a718fb53115e (diff)
drm/i915: Remove obsolete engine->gpu_caches_dirty
Space for flushing the GPU cache prior to completing the request is preallocated and so cannot fail - the GPU caches will always be flushed along with the completed request. This means we no longer have to track whether the GPU cache is dirty between batches like we had to with the outstanding_lazy_seqno. With the removal of the duplication in the per-backend entry points for emitting the obsolete lazy flush, we can then further unify the engine->emit_flush. v2: Expand a bit on the legacy of gpu_caches_dirty Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-7-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c47
1 files changed, 7 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 86b8f41c254d..e8d971e81491 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
642 spin_unlock_bh(&engine->execlist_lock); 642 spin_unlock_bh(&engine->execlist_lock);
643} 643}
644 644
645static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
646{
647 struct intel_engine_cs *engine = req->engine;
648 uint32_t flush_domains;
649 int ret;
650
651 flush_domains = 0;
652 if (engine->gpu_caches_dirty)
653 flush_domains = I915_GEM_GPU_DOMAINS;
654
655 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
656 if (ret)
657 return ret;
658
659 engine->gpu_caches_dirty = false;
660 return 0;
661}
662
663static int execlists_move_to_gpu(struct drm_i915_gem_request *req, 645static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
664 struct list_head *vmas) 646 struct list_head *vmas)
665{ 647{
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
690 /* Unconditionally invalidate gpu caches and ensure that we do flush 672 /* Unconditionally invalidate gpu caches and ensure that we do flush
691 * any residual writes from the previous batch. 673 * any residual writes from the previous batch.
692 */ 674 */
693 return logical_ring_invalidate_all_caches(req); 675 return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
694} 676}
695 677
696int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 678int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
930 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 912 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
931} 913}
932 914
933int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
934{
935 struct intel_engine_cs *engine = req->engine;
936 int ret;
937
938 if (!engine->gpu_caches_dirty)
939 return 0;
940
941 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
942 if (ret)
943 return ret;
944
945 engine->gpu_caches_dirty = false;
946 return 0;
947}
948
949static int intel_lr_context_pin(struct i915_gem_context *ctx, 915static int intel_lr_context_pin(struct i915_gem_context *ctx,
950 struct intel_engine_cs *engine) 916 struct intel_engine_cs *engine)
951{ 917{
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
1026static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 992static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1027{ 993{
1028 int ret, i; 994 int ret, i;
1029 struct intel_engine_cs *engine = req->engine;
1030 struct intel_ring *ring = req->ring; 995 struct intel_ring *ring = req->ring;
1031 struct i915_workarounds *w = &req->i915->workarounds; 996 struct i915_workarounds *w = &req->i915->workarounds;
1032 997
1033 if (w->count == 0) 998 if (w->count == 0)
1034 return 0; 999 return 0;
1035 1000
1036 engine->gpu_caches_dirty = true; 1001 ret = req->engine->emit_flush(req,
1037 ret = logical_ring_flush_all_caches(req); 1002 I915_GEM_GPU_DOMAINS,
1003 I915_GEM_GPU_DOMAINS);
1038 if (ret) 1004 if (ret)
1039 return ret; 1005 return ret;
1040 1006
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1051 1017
1052 intel_ring_advance(ring); 1018 intel_ring_advance(ring);
1053 1019
1054 engine->gpu_caches_dirty = true; 1020 ret = req->engine->emit_flush(req,
1055 ret = logical_ring_flush_all_caches(req); 1021 I915_GEM_GPU_DOMAINS,
1022 I915_GEM_GPU_DOMAINS);
1056 if (ret) 1023 if (ret)
1057 return ret; 1024 return ret;
1058 1025