aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-07-20 07:41:01 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-07-25 12:23:52 -0400
commit0201f1ecf4b81f08799b1fb9c8cdf1125b9b78a6 (patch)
tree51b722aace265466c15f884500542b1e54aa25e5 /drivers/gpu/drm/i915/i915_drv.h
parente5f1d962a8e4c5fd6b3a8155c0f7a40b0bff4a96 (diff)
drm/i915: Replace the pending_gpu_write flag with an explicit seqno
As we always flush the GPU cache prior to emitting the breadcrumb, we no longer have to worry about the deferred flush causing the pending_gpu_write to be delayed. So we can instead utilize the known last_write_seqno to hopefully minimise the wait times. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
1 files changed, 3 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f5f5ff6f897..49a532e338e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -221,7 +221,7 @@ struct drm_i915_error_state {
221 struct drm_i915_error_buffer { 221 struct drm_i915_error_buffer {
222 u32 size; 222 u32 size;
223 u32 name; 223 u32 name;
224 u32 seqno; 224 u32 rseqno, wseqno;
225 u32 gtt_offset; 225 u32 gtt_offset;
226 u32 read_domains; 226 u32 read_domains;
227 u32 write_domain; 227 u32 write_domain;
@@ -895,12 +895,6 @@ struct drm_i915_gem_object {
895 unsigned int dirty:1; 895 unsigned int dirty:1;
896 896
897 /** 897 /**
898 * This is set if the object has been written to since the last
899 * GPU flush.
900 */
901 unsigned int pending_gpu_write:1;
902
903 /**
904 * Fence register bits (if any) for this object. Will be set 898 * Fence register bits (if any) for this object. Will be set
905 * as needed when mapped into the GTT. 899 * as needed when mapped into the GTT.
906 * Protected by dev->struct_mutex. 900 * Protected by dev->struct_mutex.
@@ -992,7 +986,8 @@ struct drm_i915_gem_object {
992 struct intel_ring_buffer *ring; 986 struct intel_ring_buffer *ring;
993 987
994 /** Breadcrumb of last rendering to the buffer. */ 988 /** Breadcrumb of last rendering to the buffer. */
995 uint32_t last_rendering_seqno; 989 uint32_t last_read_seqno;
990 uint32_t last_write_seqno;
996 /** Breadcrumb of last fenced GPU access to the buffer. */ 991 /** Breadcrumb of last fenced GPU access to the buffer. */
997 uint32_t last_fenced_seqno; 992 uint32_t last_fenced_seqno;
998 993
@@ -1291,7 +1286,6 @@ void i915_gem_lastclose(struct drm_device *dev);
1291int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1286int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1292 gfp_t gfpmask); 1287 gfp_t gfpmask);
1293int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1288int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1294int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1295int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1289int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1296 struct intel_ring_buffer *to); 1290 struct intel_ring_buffer *to);
1297void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1291void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,