aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2008-11-21 02:23:03 -0500
committerDave Airlie <airlied@redhat.com>2008-12-03 20:22:02 -0500
commit646f0f6e43bf6628b1f0f8ca6c0227ce72e8ef3c (patch)
tree78182c7c580bf961c0e8f31accf573eae908ca7b /drivers
parentc0d90829288942fa06d7483f9e84059a64605da5 (diff)
drm/i915: Move the execbuffer domain computations together
This eliminates the dev_set_domain function and just in-lines it where its used, with the goal of moving the manipulation and use of invalidate_domains and flush_domains closer together. This also avoids calling add_request unless some domain has been flushed. Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c65
1 files changed, 21 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 50c75327d567..c171a2d93bb6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1647,38 +1647,6 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1647} 1647}
1648 1648
1649/** 1649/**
1650 * Once all of the objects have been set in the proper domain,
1651 * perform the necessary flush and invalidate operations.
1652 *
1653 * Returns the write domains flushed, for use in flush tracking.
1654 */
1655static uint32_t
1656i915_gem_dev_set_domain(struct drm_device *dev)
1657{
1658 uint32_t flush_domains = dev->flush_domains;
1659
1660 /*
1661 * Now that all the buffers are synced to the proper domains,
1662 * flush and invalidate the collected domains
1663 */
1664 if (dev->invalidate_domains | dev->flush_domains) {
1665#if WATCH_EXEC
1666 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1667 __func__,
1668 dev->invalidate_domains,
1669 dev->flush_domains);
1670#endif
1671 i915_gem_flush(dev,
1672 dev->invalidate_domains,
1673 dev->flush_domains);
1674 dev->invalidate_domains = 0;
1675 dev->flush_domains = 0;
1676 }
1677
1678 return flush_domains;
1679}
1680
1681/**
1682 * Pin an object to the GTT and evaluate the relocations landing in it. 1650 * Pin an object to the GTT and evaluate the relocations landing in it.
1683 */ 1651 */
1684static int 1652static int
@@ -2002,13 +1970,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2002 return -EBUSY; 1970 return -EBUSY;
2003 } 1971 }
2004 1972
2005 /* Zero the gloabl flush/invalidate flags. These
2006 * will be modified as each object is bound to the
2007 * gtt
2008 */
2009 dev->invalidate_domains = 0;
2010 dev->flush_domains = 0;
2011
2012 /* Look up object handles and perform the relocations */ 1973 /* Look up object handles and perform the relocations */
2013 for (i = 0; i < args->buffer_count; i++) { 1974 for (i = 0; i < args->buffer_count; i++) {
2014 object_list[i] = drm_gem_object_lookup(dev, file_priv, 1975 object_list[i] = drm_gem_object_lookup(dev, file_priv,
@@ -2039,10 +2000,17 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2039 2000
2040 i915_verify_inactive(dev, __FILE__, __LINE__); 2001 i915_verify_inactive(dev, __FILE__, __LINE__);
2041 2002
2003 /* Zero the global flush/invalidate flags. These
2004 * will be modified as new domains are computed
2005 * for each object
2006 */
2007 dev->invalidate_domains = 0;
2008 dev->flush_domains = 0;
2009
2042 for (i = 0; i < args->buffer_count; i++) { 2010 for (i = 0; i < args->buffer_count; i++) {
2043 struct drm_gem_object *obj = object_list[i]; 2011 struct drm_gem_object *obj = object_list[i];
2044 2012
2045 /* Compute new gpu domains and update invalidate/flushing */ 2013 /* Compute new gpu domains and update invalidate/flush */
2046 i915_gem_object_set_to_gpu_domain(obj, 2014 i915_gem_object_set_to_gpu_domain(obj,
2047 obj->pending_read_domains, 2015 obj->pending_read_domains,
2048 obj->pending_write_domain); 2016 obj->pending_write_domain);
@@ -2050,8 +2018,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2050 2018
2051 i915_verify_inactive(dev, __FILE__, __LINE__); 2019 i915_verify_inactive(dev, __FILE__, __LINE__);
2052 2020
2053 /* Flush/invalidate caches and chipset buffer */ 2021 if (dev->invalidate_domains | dev->flush_domains) {
2054 flush_domains = i915_gem_dev_set_domain(dev); 2022#if WATCH_EXEC
2023 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2024 __func__,
2025 dev->invalidate_domains,
2026 dev->flush_domains);
2027#endif
2028 i915_gem_flush(dev,
2029 dev->invalidate_domains,
2030 dev->flush_domains);
2031 if (dev->flush_domains)
2032 (void)i915_add_request(dev, dev->flush_domains);
2033 }
2055 2034
2056 i915_verify_inactive(dev, __FILE__, __LINE__); 2035 i915_verify_inactive(dev, __FILE__, __LINE__);
2057 2036
@@ -2071,8 +2050,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2071 ~0); 2050 ~0);
2072#endif 2051#endif
2073 2052
2074 (void)i915_add_request(dev, flush_domains);
2075
2076 /* Exec the batchbuffer */ 2053 /* Exec the batchbuffer */
2077 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2054 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
2078 if (ret) { 2055 if (ret) {