aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:50:24 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-02 17:58:19 -0400
commitc7fe7d25ed6036ff16b1c112463baff21c3b205d (patch)
tree7a57dbe985bbdf4834511d7fc78aa7abbaf7b890 /drivers/gpu/drm/i915/intel_ringbuffer.h
parentaad29fbbb86dbac69e25433b14c8a718fb53115e (diff)
drm/i915: Remove obsolete engine->gpu_caches_dirty
Space for flushing the GPU cache prior to completing the request is preallocated and so cannot fail - the GPU caches will always be flushed along with the completed request. This means we no longer have to track whether the GPU cache is dirty between batches like we had to with the outstanding_lazy_seqno. With the removal of the duplication in the per-backend entry points for emitting the obsolete lazy flush, we can then further unify the engine->emit_flush. v2: Expand a bit on the legacy of gpu_caches_dirty Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-7-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
1 files changed, 0 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba54ffcdd55a..00723401f98c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -206,9 +206,6 @@ struct intel_engine_cs {
206 206
207 void (*write_tail)(struct intel_engine_cs *engine, 207 void (*write_tail)(struct intel_engine_cs *engine,
208 u32 value); 208 u32 value);
209 int __must_check (*flush)(struct drm_i915_gem_request *req,
210 u32 invalidate_domains,
211 u32 flush_domains);
212 int (*add_request)(struct drm_i915_gem_request *req); 209 int (*add_request)(struct drm_i915_gem_request *req);
213 /* Some chipsets are not quite as coherent as advertised and need 210 /* Some chipsets are not quite as coherent as advertised and need
214 * an expensive kick to force a true read of the up-to-date seqno. 211 * an expensive kick to force a true read of the up-to-date seqno.
@@ -325,8 +322,6 @@ struct intel_engine_cs {
325 */ 322 */
326 u32 last_submitted_seqno; 323 u32 last_submitted_seqno;
327 324
328 bool gpu_caches_dirty;
329
330 struct i915_gem_context *last_context; 325 struct i915_gem_context *last_context;
331 326
332 struct intel_engine_hangcheck hangcheck; 327 struct intel_engine_hangcheck hangcheck;
@@ -474,8 +469,6 @@ void intel_ring_update_space(struct intel_ring *ring);
474 469
475int __must_check intel_engine_idle(struct intel_engine_cs *engine); 470int __must_check intel_engine_idle(struct intel_engine_cs *engine);
476void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); 471void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
477int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
478int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
479 472
480int intel_init_pipe_control(struct intel_engine_cs *engine, int size); 473int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
481void intel_fini_pipe_control(struct intel_engine_cs *engine); 474void intel_fini_pipe_control(struct intel_engine_cs *engine);