aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-04 13:42:07 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-11 15:42:53 -0500
commit63256ec5347fb2344a42adbae732b90603c92f35 (patch)
tree5b018e93f38f9e90f3b07beeaac4af08122c5876 /drivers
parent759010728b1323aec03c5baae13fde8f76e44a99 (diff)
drm/i915: Enforce write ordering through the GTT
We need to ensure that writes through the GTT land before any modification to the MMIO registers and so must impose a mandatory write barrier when flushing the GTT domain. This was revealed by relaxing the write ordering by experimentally mapping the registers and the GATT as write-combining. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
2 files changed, 16 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c79c0b62ef60..f9c093c08d58 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2393,6 +2393,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2393 obj->last_fenced_ring = NULL; 2393 obj->last_fenced_ring = NULL;
2394 } 2394 }
2395 2395
2396 /* Ensure that all CPU reads are completed before installing a fence
2397 * and all writes before removing the fence.
2398 */
2399 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2400 mb();
2401
2396 return 0; 2402 return 0;
2397} 2403}
2398 2404
@@ -2833,10 +2839,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 2839 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2834 return; 2840 return;
2835 2841
2836 /* No actual flushing is required for the GTT write domain. Writes 2842 /* No actual flushing is required for the GTT write domain. Writes
2837 * to it immediately go to main memory as far as we know, so there's 2843 * to it immediately go to main memory as far as we know, so there's
2838 * no chipset flush. It also doesn't land in render cache. 2844 * no chipset flush. It also doesn't land in render cache.
2845 *
2846 * However, we do have to enforce the order so that all writes through
2847 * the GTT land before any writes to the device, such as updates to
2848 * the GATT itself.
2839 */ 2849 */
2850 wmb();
2851
2840 i915_gem_release_mmap(obj); 2852 i915_gem_release_mmap(obj);
2841 2853
2842 old_write_domain = obj->base.write_domain; 2854 old_write_domain = obj->base.write_domain;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 61129e6759eb..0d42de42868c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -725,6 +725,9 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
725 if (flush_domains & I915_GEM_DOMAIN_CPU) 725 if (flush_domains & I915_GEM_DOMAIN_CPU)
726 intel_gtt_chipset_flush(); 726 intel_gtt_chipset_flush();
727 727
728 if (flush_domains & I915_GEM_DOMAIN_GTT)
729 wmb();
730
728 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { 731 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
729 for (i = 0; i < I915_NUM_RINGS; i++) 732 for (i = 0; i < I915_NUM_RINGS; i++)
730 if (flush_rings & (1 << i)) 733 if (flush_rings & (1 << i))