aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-02-19 17:40:50 -0500
committerDave Airlie <airlied@redhat.com>2009-02-22 19:06:15 -0500
commit8b0e378a20e48c691d374f39d8b0596e63598cfc (patch)
tree7aa600916495251d2afb6c9e42515b281112ee37
parent683fdc5fe5fec5c4f27eb58a8781f212f50c8e01 (diff)
drm/i915: Cut two args to set_to_gpu_domain that confused this tricky path.
While not strictly required, it helped while thinking about the following change. This change should be invariant. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ac534c9a2f81..02ef50d512d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -2021,30 +2017,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2021 * drm_agp_chipset_flush 2017 * drm_agp_chipset_flush
2022 */ 2018 */
2023static void 2019static void
2024i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2020i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2025 uint32_t read_domains,
2026 uint32_t write_domain)
2027{ 2021{
2028 struct drm_device *dev = obj->dev; 2022 struct drm_device *dev = obj->dev;
2029 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2023 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2030 uint32_t invalidate_domains = 0; 2024 uint32_t invalidate_domains = 0;
2031 uint32_t flush_domains = 0; 2025 uint32_t flush_domains = 0;
2032 2026
2033 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2027 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2034 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2028 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2035 2029
2036#if WATCH_BUF 2030#if WATCH_BUF
2037 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2031 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2038 __func__, obj, 2032 __func__, obj,
2039 obj->read_domains, read_domains, 2033 obj->read_domains, obj->pending_read_domains,
2040 obj->write_domain, write_domain); 2034 obj->write_domain, obj->pending_write_domain);
2041#endif 2035#endif
2042 /* 2036 /*
2043 * If the object isn't moving to a new write domain, 2037 * If the object isn't moving to a new write domain,
2044 * let the object stay in multiple read domains 2038 * let the object stay in multiple read domains
2045 */ 2039 */
2046 if (write_domain == 0) 2040 if (obj->pending_write_domain == 0)
2047 read_domains |= obj->read_domains; 2041 obj->pending_read_domains |= obj->read_domains;
2048 else 2042 else
2049 obj_priv->dirty = 1; 2043 obj_priv->dirty = 1;
2050 2044
@@ -2054,15 +2048,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2054 * any read domains which differ from the old 2048 * any read domains which differ from the old
2055 * write domain 2049 * write domain
2056 */ 2050 */
2057 if (obj->write_domain && obj->write_domain != read_domains) { 2051 if (obj->write_domain &&
2052 obj->write_domain != obj->pending_read_domains) {
2058 flush_domains |= obj->write_domain; 2053 flush_domains |= obj->write_domain;
2059 invalidate_domains |= read_domains & ~obj->write_domain; 2054 invalidate_domains |=
2055 obj->pending_read_domains & ~obj->write_domain;
2060 } 2056 }
2061 /* 2057 /*
2062 * Invalidate any read caches which may have 2058 * Invalidate any read caches which may have
2063 * stale data. That is, any new read domains. 2059 * stale data. That is, any new read domains.
2064 */ 2060 */
2065 invalidate_domains |= read_domains & ~obj->read_domains; 2061 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2066 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2062 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2067#if WATCH_BUF 2063#if WATCH_BUF
2068 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2064 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2071,9 +2067,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2071 i915_gem_clflush_object(obj); 2067 i915_gem_clflush_object(obj);
2072 } 2068 }
2073 2069
2074 if ((write_domain | flush_domains) != 0) 2070 if ((obj->pending_write_domain | flush_domains) != 0)
2075 obj->write_domain = write_domain; 2071 obj->write_domain = obj->pending_write_domain;
2076 obj->read_domains = read_domains; 2072 obj->read_domains = obj->pending_read_domains;
2077 2073
2078 dev->invalidate_domains |= invalidate_domains; 2074 dev->invalidate_domains |= invalidate_domains;
2079 dev->flush_domains |= flush_domains; 2075 dev->flush_domains |= flush_domains;
@@ -2583,9 +2579,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2583 struct drm_gem_object *obj = object_list[i]; 2579 struct drm_gem_object *obj = object_list[i];
2584 2580
2585 /* Compute new gpu domains and update invalidate/flush */ 2581 /* Compute new gpu domains and update invalidate/flush */
2586 i915_gem_object_set_to_gpu_domain(obj, 2582 i915_gem_object_set_to_gpu_domain(obj);
2587 obj->pending_read_domains,
2588 obj->pending_write_domain);
2589 } 2583 }
2590 2584
2591 i915_verify_inactive(dev, __FILE__, __LINE__); 2585 i915_verify_inactive(dev, __FILE__, __LINE__);