aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-02-19 17:54:51 -0500
committerDave Airlie <airlied@redhat.com>2009-02-22 19:06:19 -0500
commitefbeed96f7e20783b22d9529ef536b61f7ea8637 (patch)
tree0b087388f9fd5ba506c1d985f53dd820ff73583a /drivers/gpu/drm/i915
parent8b0e378a20e48c691d374f39d8b0596e63598cfc (diff)
drm/i915: Don't let a device flush to prepare buffers clear new write_domains.
The problem was that object_set_to_gpu_domain would set the new write_domains that are getting set by this batchbuffer, then the accumulated flushes required for all the objects in preparation for this batchbuffer were posted, and the brand new write domain would get cleared by the flush being posted. Instead, hang on to the new (or old if we're not changing it) value and set it after the flush is queued. Results from this noticably included conformance test failures from reads shortly after writes (where the new write domain had been lost and thus not flushed and waited on), but is a suspected cause of hangs in some apps when a write domain is lost on a buffer that gets reused for instruction or commmand state. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 02ef50d512d6..0f50574076b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2067,8 +2067,14 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2067 i915_gem_clflush_object(obj); 2067 i915_gem_clflush_object(obj);
2068 } 2068 }
2069 2069
2070 if ((obj->pending_write_domain | flush_domains) != 0) 2070 /* The actual obj->write_domain will be updated with
2071 obj->write_domain = obj->pending_write_domain; 2071 * pending_write_domain after we emit the accumulated flush for all
2072 * of our domain changes in execbuffers (which clears objects'
2073 * write_domains). So if we have a current write domain that we
2074 * aren't changing, set pending_write_domain to that.
2075 */
2076 if (flush_domains == 0 && obj->pending_write_domain == 0)
2077 obj->pending_write_domain = obj->write_domain;
2072 obj->read_domains = obj->pending_read_domains; 2078 obj->read_domains = obj->pending_read_domains;
2073 2079
2074 dev->invalidate_domains |= invalidate_domains; 2080 dev->invalidate_domains |= invalidate_domains;
@@ -2598,6 +2604,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2598 (void)i915_add_request(dev, dev->flush_domains); 2604 (void)i915_add_request(dev, dev->flush_domains);
2599 } 2605 }
2600 2606
2607 for (i = 0; i < args->buffer_count; i++) {
2608 struct drm_gem_object *obj = object_list[i];
2609
2610 obj->write_domain = obj->pending_write_domain;
2611 }
2612
2601 i915_verify_inactive(dev, __FILE__, __LINE__); 2613 i915_verify_inactive(dev, __FILE__, __LINE__);
2602 2614
2603#if WATCH_COHERENCY 2615#if WATCH_COHERENCY