diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-20 07:50:23 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-21 06:24:16 -0400 |
commit | c78ec30bba52754b9f21a899eac2e2f5a7486116 (patch) | |
tree | 2c8c394326e9d711407566ec003886f2617d47f0 /drivers/gpu | |
parent | 53640e1d07fb7dd5d14300dd94f4718eca33348e (diff) |
drm/i915: Merge ring flushing and lazy requests
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 53 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 2 |
3 files changed, 24 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6e22be4f3585..37a44c80efd2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1003,6 +1003,7 @@ void i915_gem_reset_flushing_list(struct drm_device *dev); | |||
1003 | void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); | 1003 | void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); |
1004 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 1004 | void i915_gem_clflush_object(struct drm_gem_object *obj); |
1005 | void i915_gem_flush_ring(struct drm_device *dev, | 1005 | void i915_gem_flush_ring(struct drm_device *dev, |
1006 | struct drm_file *file_priv, | ||
1006 | struct intel_ring_buffer *ring, | 1007 | struct intel_ring_buffer *ring, |
1007 | uint32_t invalidate_domains, | 1008 | uint32_t invalidate_domains, |
1008 | uint32_t flush_domains); | 1009 | uint32_t flush_domains); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a5d5751bad30..58baecc821a5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1910,16 +1910,23 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1910 | 1910 | ||
1911 | void | 1911 | void |
1912 | i915_gem_flush_ring(struct drm_device *dev, | 1912 | i915_gem_flush_ring(struct drm_device *dev, |
1913 | struct drm_file *file_priv, | ||
1913 | struct intel_ring_buffer *ring, | 1914 | struct intel_ring_buffer *ring, |
1914 | uint32_t invalidate_domains, | 1915 | uint32_t invalidate_domains, |
1915 | uint32_t flush_domains) | 1916 | uint32_t flush_domains) |
1916 | { | 1917 | { |
1917 | ring->flush(dev, ring, invalidate_domains, flush_domains); | 1918 | ring->flush(dev, ring, invalidate_domains, flush_domains); |
1918 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 1919 | i915_gem_process_flushing_list(dev, flush_domains, ring); |
1920 | |||
1921 | if (ring->outstanding_lazy_request) { | ||
1922 | (void)i915_add_request(dev, file_priv, NULL, ring); | ||
1923 | ring->outstanding_lazy_request = false; | ||
1924 | } | ||
1919 | } | 1925 | } |
1920 | 1926 | ||
1921 | static void | 1927 | static void |
1922 | i915_gem_flush(struct drm_device *dev, | 1928 | i915_gem_flush(struct drm_device *dev, |
1929 | struct drm_file *file_priv, | ||
1923 | uint32_t invalidate_domains, | 1930 | uint32_t invalidate_domains, |
1924 | uint32_t flush_domains, | 1931 | uint32_t flush_domains, |
1925 | uint32_t flush_rings) | 1932 | uint32_t flush_rings) |
@@ -1931,11 +1938,11 @@ i915_gem_flush(struct drm_device *dev, | |||
1931 | 1938 | ||
1932 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 1939 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
1933 | if (flush_rings & RING_RENDER) | 1940 | if (flush_rings & RING_RENDER) |
1934 | i915_gem_flush_ring(dev, | 1941 | i915_gem_flush_ring(dev, file_priv, |
1935 | &dev_priv->render_ring, | 1942 | &dev_priv->render_ring, |
1936 | invalidate_domains, flush_domains); | 1943 | invalidate_domains, flush_domains); |
1937 | if (flush_rings & RING_BSD) | 1944 | if (flush_rings & RING_BSD) |
1938 | i915_gem_flush_ring(dev, | 1945 | i915_gem_flush_ring(dev, file_priv, |
1939 | &dev_priv->bsd_ring, | 1946 | &dev_priv->bsd_ring, |
1940 | invalidate_domains, flush_domains); | 1947 | invalidate_domains, flush_domains); |
1941 | } | 1948 | } |
@@ -2054,6 +2061,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2054 | { | 2061 | { |
2055 | drm_i915_private_t *dev_priv = dev->dev_private; | 2062 | drm_i915_private_t *dev_priv = dev->dev_private; |
2056 | bool lists_empty; | 2063 | bool lists_empty; |
2064 | u32 seqno; | ||
2057 | int ret; | 2065 | int ret; |
2058 | 2066 | ||
2059 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2067 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
@@ -2064,24 +2072,18 @@ i915_gpu_idle(struct drm_device *dev) | |||
2064 | return 0; | 2072 | return 0; |
2065 | 2073 | ||
2066 | /* Flush everything onto the inactive list. */ | 2074 | /* Flush everything onto the inactive list. */ |
2067 | i915_gem_flush_ring(dev, | 2075 | seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); |
2068 | &dev_priv->render_ring, | 2076 | i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring, |
2069 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2077 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2070 | 2078 | ret = i915_wait_request(dev, seqno, &dev_priv->render_ring); | |
2071 | ret = i915_wait_request(dev, | ||
2072 | i915_gem_next_request_seqno(dev, &dev_priv->render_ring), | ||
2073 | &dev_priv->render_ring); | ||
2074 | if (ret) | 2079 | if (ret) |
2075 | return ret; | 2080 | return ret; |
2076 | 2081 | ||
2077 | if (HAS_BSD(dev)) { | 2082 | if (HAS_BSD(dev)) { |
2078 | i915_gem_flush_ring(dev, | 2083 | seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); |
2079 | &dev_priv->bsd_ring, | 2084 | i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring, |
2080 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2085 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2081 | 2086 | ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring); | |
2082 | ret = i915_wait_request(dev, | ||
2083 | i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring), | ||
2084 | &dev_priv->bsd_ring); | ||
2085 | if (ret) | 2087 | if (ret) |
2086 | return ret; | 2088 | return ret; |
2087 | } | 2089 | } |
@@ -2651,7 +2653,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | |||
2651 | 2653 | ||
2652 | /* Queue the GPU write cache flushing we need. */ | 2654 | /* Queue the GPU write cache flushing we need. */ |
2653 | old_write_domain = obj->write_domain; | 2655 | old_write_domain = obj->write_domain; |
2654 | i915_gem_flush_ring(dev, | 2656 | i915_gem_flush_ring(dev, NULL, |
2655 | to_intel_bo(obj)->ring, | 2657 | to_intel_bo(obj)->ring, |
2656 | 0, obj->write_domain); | 2658 | 0, obj->write_domain); |
2657 | BUG_ON(obj->write_domain); | 2659 | BUG_ON(obj->write_domain); |
@@ -2780,7 +2782,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
2780 | i915_gem_object_flush_cpu_write_domain(obj); | 2782 | i915_gem_object_flush_cpu_write_domain(obj); |
2781 | 2783 | ||
2782 | old_read_domains = obj->read_domains; | 2784 | old_read_domains = obj->read_domains; |
2783 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 2785 | obj->read_domains |= I915_GEM_DOMAIN_GTT; |
2784 | 2786 | ||
2785 | trace_i915_gem_object_change_domain(obj, | 2787 | trace_i915_gem_object_change_domain(obj, |
2786 | old_read_domains, | 2788 | old_read_domains, |
@@ -2837,7 +2839,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2837 | * need to be invalidated at next use. | 2839 | * need to be invalidated at next use. |
2838 | */ | 2840 | */ |
2839 | if (write) { | 2841 | if (write) { |
2840 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | 2842 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
2841 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 2843 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
2842 | } | 2844 | } |
2843 | 2845 | ||
@@ -3762,21 +3764,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3762 | dev->invalidate_domains, | 3764 | dev->invalidate_domains, |
3763 | dev->flush_domains); | 3765 | dev->flush_domains); |
3764 | #endif | 3766 | #endif |
3765 | i915_gem_flush(dev, | 3767 | i915_gem_flush(dev, file_priv, |
3766 | dev->invalidate_domains, | 3768 | dev->invalidate_domains, |
3767 | dev->flush_domains, | 3769 | dev->flush_domains, |
3768 | dev_priv->mm.flush_rings); | 3770 | dev_priv->mm.flush_rings); |
3769 | } | 3771 | } |
3770 | 3772 | ||
3771 | if (dev_priv->render_ring.outstanding_lazy_request) { | ||
3772 | (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring); | ||
3773 | dev_priv->render_ring.outstanding_lazy_request = false; | ||
3774 | } | ||
3775 | if (dev_priv->bsd_ring.outstanding_lazy_request) { | ||
3776 | (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring); | ||
3777 | dev_priv->bsd_ring.outstanding_lazy_request = false; | ||
3778 | } | ||
3779 | |||
3780 | for (i = 0; i < args->buffer_count; i++) { | 3773 | for (i = 0; i < args->buffer_count; i++) { |
3781 | struct drm_gem_object *obj = object_list[i]; | 3774 | struct drm_gem_object *obj = object_list[i]; |
3782 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3775 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -4232,12 +4225,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4232 | * use this buffer rather sooner than later, so issuing the required | 4225 | * use this buffer rather sooner than later, so issuing the required |
4233 | * flush earlier is beneficial. | 4226 | * flush earlier is beneficial. |
4234 | */ | 4227 | */ |
4235 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) { | 4228 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) |
4236 | i915_gem_flush_ring(dev, | 4229 | i915_gem_flush_ring(dev, file_priv, |
4237 | obj_priv->ring, | 4230 | obj_priv->ring, |
4238 | 0, obj->write_domain); | 4231 | 0, obj->write_domain); |
4239 | (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring); | ||
4240 | } | ||
4241 | 4232 | ||
4242 | /* Update the active list for the hardware's current position. | 4233 | /* Update the active list for the hardware's current position. |
4243 | * Otherwise this only updates on a delayed timer or when irqs | 4234 | * Otherwise this only updates on a delayed timer or when irqs |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0505ddb76a10..791374c888da 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5058,7 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5058 | 5058 | ||
5059 | /* Schedule the pipelined flush */ | 5059 | /* Schedule the pipelined flush */ |
5060 | if (was_dirty) | 5060 | if (was_dirty) |
5061 | i915_gem_flush_ring(dev, obj_priv->ring, 0, was_dirty); | 5061 | i915_gem_flush_ring(dev, NULL, obj_priv->ring, 0, was_dirty); |
5062 | 5062 | ||
5063 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5063 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5064 | u32 flip_mask; | 5064 | u32 flip_mask; |