aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:50:24 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:58:19 -0400
commitc7fe7d25ed6036ff16b1c112463baff21c3b205d (patch)
tree7a57dbe985bbdf4834511d7fc78aa7abbaf7b890 /drivers/gpu/drm
parentaad29fbbb86dbac69e25433b14c8a718fb53115e (diff)
drm/i915: Remove obsolete engine->gpu_caches_dirty
Space for flushing the GPU cache prior to completing the request is preallocated and so cannot fail - the GPU caches will always be flushed along with the completed request. This means we no longer have to track whether the GPU cache is dirty between batches like we had to with the outstanding_lazy_seqno. With the removal of the duplication in the per-backend entry points for emitting the obsolete lazy flush, we can then further unify the engine->emit_flush. v2: Expand a bit on the legacy of gpu_caches_dirty Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-7-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c47
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c72
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
8 files changed, 37 insertions, 121 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3336a5fcd029..beece8feb8fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
568 * itlb_before_ctx_switch. 568 * itlb_before_ctx_switch.
569 */ 569 */
570 if (IS_GEN6(dev_priv)) { 570 if (IS_GEN6(dev_priv)) {
571 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 571 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
572 if (ret) 572 if (ret)
573 return ret; 573 return ret;
574 } 574 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d0ef675fb169..35c4c595e5ba 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -998,10 +998,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
998 if (flush_domains & I915_GEM_DOMAIN_GTT) 998 if (flush_domains & I915_GEM_DOMAIN_GTT)
999 wmb(); 999 wmb();
1000 1000
1001 /* Unconditionally invalidate gpu caches and ensure that we do flush 1001 /* Unconditionally invalidate GPU caches and TLBs. */
1002 * any residual writes from the previous batch. 1002 return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
1003 */
1004 return intel_engine_invalidate_all_caches(req);
1005} 1003}
1006 1004
1007static bool 1005static bool
@@ -1163,9 +1161,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1163static void 1161static void
1164i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) 1162i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1165{ 1163{
1166 /* Unconditionally force add_request to emit a full flush. */
1167 params->engine->gpu_caches_dirty = true;
1168
1169 /* Add a breadcrumb for the completion of the batch buffer */ 1164 /* Add a breadcrumb for the completion of the batch buffer */
1170 __i915_add_request(params->request, params->batch_obj, true); 1165 __i915_add_request(params->request, params->batch_obj, true);
1171} 1166}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ebfa0406a6a1..39fa9eb10514 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1666,7 +1666,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1666 int ret; 1666 int ret;
1667 1667
1668 /* NB: TLBs must be flushed and invalidated before a switch */ 1668 /* NB: TLBs must be flushed and invalidated before a switch */
1669 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1669 ret = engine->emit_flush(req,
1670 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1670 if (ret) 1671 if (ret)
1671 return ret; 1672 return ret;
1672 1673
@@ -1693,7 +1694,8 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1693 int ret; 1694 int ret;
1694 1695
1695 /* NB: TLBs must be flushed and invalidated before a switch */ 1696 /* NB: TLBs must be flushed and invalidated before a switch */
1696 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1697 ret = engine->emit_flush(req,
1698 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1697 if (ret) 1699 if (ret)
1698 return ret; 1700 return ret;
1699 1701
@@ -1711,8 +1713,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1711 1713
1712 /* XXX: RCS is the only one to auto invalidate the TLBs? */ 1714 /* XXX: RCS is the only one to auto invalidate the TLBs? */
1713 if (engine->id != RCS) { 1715 if (engine->id != RCS) {
1714 ret = engine->flush(req, 1716 ret = engine->emit_flush(req,
1715 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1717 I915_GEM_GPU_DOMAINS,
1718 I915_GEM_GPU_DOMAINS);
1716 if (ret) 1719 if (ret)
1717 return ret; 1720 return ret;
1718 } 1721 }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 942b5b1f1602..7e3206051ced 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -451,12 +451,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
451 * what. 451 * what.
452 */ 452 */
453 if (flush_caches) { 453 if (flush_caches) {
454 if (i915.enable_execlists) 454 ret = engine->emit_flush(request, 0, I915_GEM_GPU_DOMAINS);
455 ret = logical_ring_flush_all_caches(request); 455
456 else
457 ret = intel_engine_flush_all_caches(request);
458 /* Not allowed to fail! */ 456 /* Not allowed to fail! */
459 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); 457 WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
460 } 458 }
461 459
462 trace_i915_gem_request_add(request); 460 trace_i915_gem_request_add(request);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 86b8f41c254d..e8d971e81491 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
642 spin_unlock_bh(&engine->execlist_lock); 642 spin_unlock_bh(&engine->execlist_lock);
643} 643}
644 644
645static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
646{
647 struct intel_engine_cs *engine = req->engine;
648 uint32_t flush_domains;
649 int ret;
650
651 flush_domains = 0;
652 if (engine->gpu_caches_dirty)
653 flush_domains = I915_GEM_GPU_DOMAINS;
654
655 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
656 if (ret)
657 return ret;
658
659 engine->gpu_caches_dirty = false;
660 return 0;
661}
662
663static int execlists_move_to_gpu(struct drm_i915_gem_request *req, 645static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
664 struct list_head *vmas) 646 struct list_head *vmas)
665{ 647{
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
690 /* Unconditionally invalidate gpu caches and ensure that we do flush 672 /* Unconditionally invalidate gpu caches and ensure that we do flush
691 * any residual writes from the previous batch. 673 * any residual writes from the previous batch.
692 */ 674 */
693 return logical_ring_invalidate_all_caches(req); 675 return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
694} 676}
695 677
696int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 678int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
930 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 912 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
931} 913}
932 914
933int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
934{
935 struct intel_engine_cs *engine = req->engine;
936 int ret;
937
938 if (!engine->gpu_caches_dirty)
939 return 0;
940
941 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
942 if (ret)
943 return ret;
944
945 engine->gpu_caches_dirty = false;
946 return 0;
947}
948
949static int intel_lr_context_pin(struct i915_gem_context *ctx, 915static int intel_lr_context_pin(struct i915_gem_context *ctx,
950 struct intel_engine_cs *engine) 916 struct intel_engine_cs *engine)
951{ 917{
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
1026static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 992static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1027{ 993{
1028 int ret, i; 994 int ret, i;
1029 struct intel_engine_cs *engine = req->engine;
1030 struct intel_ring *ring = req->ring; 995 struct intel_ring *ring = req->ring;
1031 struct i915_workarounds *w = &req->i915->workarounds; 996 struct i915_workarounds *w = &req->i915->workarounds;
1032 997
1033 if (w->count == 0) 998 if (w->count == 0)
1034 return 0; 999 return 0;
1035 1000
1036 engine->gpu_caches_dirty = true; 1001 ret = req->engine->emit_flush(req,
1037 ret = logical_ring_flush_all_caches(req); 1002 I915_GEM_GPU_DOMAINS,
1003 I915_GEM_GPU_DOMAINS);
1038 if (ret) 1004 if (ret)
1039 return ret; 1005 return ret;
1040 1006
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1051 1017
1052 intel_ring_advance(ring); 1018 intel_ring_advance(ring);
1053 1019
1054 engine->gpu_caches_dirty = true; 1020 ret = req->engine->emit_flush(req,
1055 ret = logical_ring_flush_all_caches(req); 1021 I915_GEM_GPU_DOMAINS,
1022 I915_GEM_GPU_DOMAINS);
1056 if (ret) 1023 if (ret)
1057 return ret; 1024 return ret;
1058 1025
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index d26fb44549e5..33e0193e5451 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -72,8 +72,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
72 72
73int intel_engines_init(struct drm_device *dev); 73int intel_engines_init(struct drm_device *dev);
74 74
75int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
76
77/* Logical Ring Contexts */ 75/* Logical Ring Contexts */
78 76
79/* One extra page is added before LRC for GuC as shared data */ 77/* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e7a7f67ab06d..9e4b49644553 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -688,8 +688,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
688 if (w->count == 0) 688 if (w->count == 0)
689 return 0; 689 return 0;
690 690
691 req->engine->gpu_caches_dirty = true; 691 ret = req->engine->emit_flush(req,
692 ret = intel_engine_flush_all_caches(req); 692 I915_GEM_GPU_DOMAINS,
693 I915_GEM_GPU_DOMAINS);
693 if (ret) 694 if (ret)
694 return ret; 695 return ret;
695 696
@@ -706,8 +707,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
706 707
707 intel_ring_advance(ring); 708 intel_ring_advance(ring);
708 709
709 req->engine->gpu_caches_dirty = true; 710 ret = req->engine->emit_flush(req,
710 ret = intel_engine_flush_all_caches(req); 711 I915_GEM_GPU_DOMAINS,
712 I915_GEM_GPU_DOMAINS);
711 if (ret) 713 if (ret)
712 return ret; 714 return ret;
713 715
@@ -2860,21 +2862,21 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2860 if (INTEL_GEN(dev_priv) >= 8) { 2862 if (INTEL_GEN(dev_priv) >= 8) {
2861 engine->init_context = intel_rcs_ctx_init; 2863 engine->init_context = intel_rcs_ctx_init;
2862 engine->add_request = gen8_render_add_request; 2864 engine->add_request = gen8_render_add_request;
2863 engine->flush = gen8_render_ring_flush; 2865 engine->emit_flush = gen8_render_ring_flush;
2864 if (i915.semaphores) 2866 if (i915.semaphores)
2865 engine->semaphore.signal = gen8_rcs_signal; 2867 engine->semaphore.signal = gen8_rcs_signal;
2866 } else if (INTEL_GEN(dev_priv) >= 6) { 2868 } else if (INTEL_GEN(dev_priv) >= 6) {
2867 engine->init_context = intel_rcs_ctx_init; 2869 engine->init_context = intel_rcs_ctx_init;
2868 engine->flush = gen7_render_ring_flush; 2870 engine->emit_flush = gen7_render_ring_flush;
2869 if (IS_GEN6(dev_priv)) 2871 if (IS_GEN6(dev_priv))
2870 engine->flush = gen6_render_ring_flush; 2872 engine->emit_flush = gen6_render_ring_flush;
2871 } else if (IS_GEN5(dev_priv)) { 2873 } else if (IS_GEN5(dev_priv)) {
2872 engine->flush = gen4_render_ring_flush; 2874 engine->emit_flush = gen4_render_ring_flush;
2873 } else { 2875 } else {
2874 if (INTEL_GEN(dev_priv) < 4) 2876 if (INTEL_GEN(dev_priv) < 4)
2875 engine->flush = gen2_render_ring_flush; 2877 engine->emit_flush = gen2_render_ring_flush;
2876 else 2878 else
2877 engine->flush = gen4_render_ring_flush; 2879 engine->emit_flush = gen4_render_ring_flush;
2878 engine->irq_enable_mask = I915_USER_INTERRUPT; 2880 engine->irq_enable_mask = I915_USER_INTERRUPT;
2879 } 2881 }
2880 2882
@@ -2911,12 +2913,12 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2911 /* gen6 bsd needs a special wa for tail updates */ 2913 /* gen6 bsd needs a special wa for tail updates */
2912 if (IS_GEN6(dev_priv)) 2914 if (IS_GEN6(dev_priv))
2913 engine->write_tail = gen6_bsd_ring_write_tail; 2915 engine->write_tail = gen6_bsd_ring_write_tail;
2914 engine->flush = gen6_bsd_ring_flush; 2916 engine->emit_flush = gen6_bsd_ring_flush;
2915 if (INTEL_GEN(dev_priv) < 8) 2917 if (INTEL_GEN(dev_priv) < 8)
2916 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2918 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2917 } else { 2919 } else {
2918 engine->mmio_base = BSD_RING_BASE; 2920 engine->mmio_base = BSD_RING_BASE;
2919 engine->flush = bsd_ring_flush; 2921 engine->emit_flush = bsd_ring_flush;
2920 if (IS_GEN5(dev_priv)) 2922 if (IS_GEN5(dev_priv))
2921 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2923 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2922 else 2924 else
@@ -2935,7 +2937,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2935 2937
2936 intel_ring_default_vfuncs(dev_priv, engine); 2938 intel_ring_default_vfuncs(dev_priv, engine);
2937 2939
2938 engine->flush = gen6_bsd_ring_flush; 2940 engine->emit_flush = gen6_bsd_ring_flush;
2939 2941
2940 return intel_init_ring_buffer(engine); 2942 return intel_init_ring_buffer(engine);
2941} 2943}
@@ -2946,7 +2948,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2946 2948
2947 intel_ring_default_vfuncs(dev_priv, engine); 2949 intel_ring_default_vfuncs(dev_priv, engine);
2948 2950
2949 engine->flush = gen6_ring_flush; 2951 engine->emit_flush = gen6_ring_flush;
2950 if (INTEL_GEN(dev_priv) < 8) 2952 if (INTEL_GEN(dev_priv) < 8)
2951 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2953 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2952 2954
@@ -2959,7 +2961,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2959 2961
2960 intel_ring_default_vfuncs(dev_priv, engine); 2962 intel_ring_default_vfuncs(dev_priv, engine);
2961 2963
2962 engine->flush = gen6_ring_flush; 2964 engine->emit_flush = gen6_ring_flush;
2963 2965
2964 if (INTEL_GEN(dev_priv) < 8) { 2966 if (INTEL_GEN(dev_priv) < 8) {
2965 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2967 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2970,46 +2972,6 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2970 return intel_init_ring_buffer(engine); 2972 return intel_init_ring_buffer(engine);
2971} 2973}
2972 2974
2973int
2974intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
2975{
2976 struct intel_engine_cs *engine = req->engine;
2977 int ret;
2978
2979 if (!engine->gpu_caches_dirty)
2980 return 0;
2981
2982 ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
2983 if (ret)
2984 return ret;
2985
2986 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
2987
2988 engine->gpu_caches_dirty = false;
2989 return 0;
2990}
2991
2992int
2993intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
2994{
2995 struct intel_engine_cs *engine = req->engine;
2996 uint32_t flush_domains;
2997 int ret;
2998
2999 flush_domains = 0;
3000 if (engine->gpu_caches_dirty)
3001 flush_domains = I915_GEM_GPU_DOMAINS;
3002
3003 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
3004 if (ret)
3005 return ret;
3006
3007 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
3008
3009 engine->gpu_caches_dirty = false;
3010 return 0;
3011}
3012
3013void intel_engine_stop(struct intel_engine_cs *engine) 2975void intel_engine_stop(struct intel_engine_cs *engine)
3014{ 2976{
3015 int ret; 2977 int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba54ffcdd55a..00723401f98c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -206,9 +206,6 @@ struct intel_engine_cs {
206 206
207 void (*write_tail)(struct intel_engine_cs *engine, 207 void (*write_tail)(struct intel_engine_cs *engine,
208 u32 value); 208 u32 value);
209 int __must_check (*flush)(struct drm_i915_gem_request *req,
210 u32 invalidate_domains,
211 u32 flush_domains);
212 int (*add_request)(struct drm_i915_gem_request *req); 209 int (*add_request)(struct drm_i915_gem_request *req);
213 /* Some chipsets are not quite as coherent as advertised and need 210 /* Some chipsets are not quite as coherent as advertised and need
214 * an expensive kick to force a true read of the up-to-date seqno. 211 * an expensive kick to force a true read of the up-to-date seqno.
@@ -325,8 +322,6 @@ struct intel_engine_cs {
325 */ 322 */
326 u32 last_submitted_seqno; 323 u32 last_submitted_seqno;
327 324
328 bool gpu_caches_dirty;
329
330 struct i915_gem_context *last_context; 325 struct i915_gem_context *last_context;
331 326
332 struct intel_engine_hangcheck hangcheck; 327 struct intel_engine_hangcheck hangcheck;
@@ -474,8 +469,6 @@ void intel_ring_update_space(struct intel_ring *ring);
474 469
475int __must_check intel_engine_idle(struct intel_engine_cs *engine); 470int __must_check intel_engine_idle(struct intel_engine_cs *engine);
476void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); 471void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
477int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
478int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
479 472
480int intel_init_pipe_control(struct intel_engine_cs *engine, int size); 473int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
481void intel_fini_pipe_control(struct intel_engine_cs *engine); 474void intel_fini_pipe_control(struct intel_engine_cs *engine);