aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-23 05:15:06 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-23 06:07:21 -0400
commitb6651458d33c309767762a6c3da041573413fd88 (patch)
treecf929107666824633ce8406816abf7f7595f89c4 /drivers/gpu/drm
parent297b0c5be3b6e08890cbd7149313408847e81715 (diff)
drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
When the object has been written to by the gpu it remains on the ring until its flush has been retired. However, when the object is moving to the ring and the associated cache needs to be invalidated, we need to perform the flush on the target ring, not the one it came from (which is NULL in the reported case and so the flush was entirely absent). Reported-by: Peter Clifton <pcjc2@cam.ac.uk> Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9290f02215c..e7f27a5b89d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3078,7 +3078,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
3078 * drm_agp_chipset_flush 3078 * drm_agp_chipset_flush
3079 */ 3079 */
3080static void 3080static void
3081i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3081i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3082 struct intel_ring_buffer *ring)
3082{ 3083{
3083 struct drm_device *dev = obj->dev; 3084 struct drm_device *dev = obj->dev;
3084 struct drm_i915_private *dev_priv = dev->dev_private; 3085 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3132,8 +3133,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3132 3133
3133 dev->invalidate_domains |= invalidate_domains; 3134 dev->invalidate_domains |= invalidate_domains;
3134 dev->flush_domains |= flush_domains; 3135 dev->flush_domains |= flush_domains;
3135 if (obj_priv->ring) 3136 if (flush_domains & I915_GEM_GPU_DOMAINS)
3136 dev_priv->mm.flush_rings |= obj_priv->ring->id; 3137 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3138 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3139 dev_priv->mm.flush_rings |= ring->id;
3137 3140
3138 trace_i915_gem_object_change_domain(obj, 3141 trace_i915_gem_object_change_domain(obj,
3139 old_read_domains, 3142 old_read_domains,
@@ -3765,7 +3768,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3765 struct drm_gem_object *obj = object_list[i]; 3768 struct drm_gem_object *obj = object_list[i];
3766 3769
3767 /* Compute new gpu domains and update invalidate/flush */ 3770 /* Compute new gpu domains and update invalidate/flush */
3768 i915_gem_object_set_to_gpu_domain(obj); 3771 i915_gem_object_set_to_gpu_domain(obj, ring);
3769 } 3772 }
3770 3773
3771 if (dev->invalidate_domains | dev->flush_domains) { 3774 if (dev->invalidate_domains | dev->flush_domains) {