diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-07 12:09:48 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-11 15:44:50 -0500 |
commit | 882417851a0f2e09e110038a13e88e9b5a100800 (patch) | |
tree | 6c96e700acb948aabbc7857f823bb8ad114b3b44 | |
parent | 776ad8062bb77697b8728a9794e3a394b28cf885 (diff) |
drm/i915: Propagate error from flushing the ring
... in order to avoid a BUG() and potential unbounded waits.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 102 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 |
3 files changed, 90 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 455260067ff7..3e78314514a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1086,10 +1086,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1086 | struct drm_file *file_priv); | 1086 | struct drm_file *file_priv); |
1087 | void i915_gem_load(struct drm_device *dev); | 1087 | void i915_gem_load(struct drm_device *dev); |
1088 | int i915_gem_init_object(struct drm_gem_object *obj); | 1088 | int i915_gem_init_object(struct drm_gem_object *obj); |
1089 | void i915_gem_flush_ring(struct drm_device *dev, | 1089 | int __must_check i915_gem_flush_ring(struct drm_device *dev, |
1090 | struct intel_ring_buffer *ring, | 1090 | struct intel_ring_buffer *ring, |
1091 | uint32_t invalidate_domains, | 1091 | uint32_t invalidate_domains, |
1092 | uint32_t flush_domains); | 1092 | uint32_t flush_domains); |
1093 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1093 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1094 | size_t size); | 1094 | size_t size); |
1095 | void i915_gem_free_object(struct drm_gem_object *obj); | 1095 | void i915_gem_free_object(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 07b62449b9e1..2873d068eb1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,18 +35,18 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | 38 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
41 | static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, | 41 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
42 | bool write); | 42 | bool write); |
43 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | 43 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
44 | uint64_t offset, | 44 | uint64_t offset, |
45 | uint64_t size); | 45 | uint64_t size); |
46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); | 46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
47 | static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 47 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
48 | unsigned alignment, | 48 | unsigned alignment, |
49 | bool map_and_fenceable); | 49 | bool map_and_fenceable); |
50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, | 50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
51 | struct drm_i915_fence_reg *reg); | 51 | struct drm_i915_fence_reg *reg); |
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, | 52 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
@@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2142 | return ret; | 2142 | return ret; |
2143 | } | 2143 | } |
2144 | 2144 | ||
2145 | void | 2145 | int |
2146 | i915_gem_flush_ring(struct drm_device *dev, | 2146 | i915_gem_flush_ring(struct drm_device *dev, |
2147 | struct intel_ring_buffer *ring, | 2147 | struct intel_ring_buffer *ring, |
2148 | uint32_t invalidate_domains, | 2148 | uint32_t invalidate_domains, |
2149 | uint32_t flush_domains) | 2149 | uint32_t flush_domains) |
2150 | { | 2150 | { |
2151 | if (ring->flush(ring, invalidate_domains, flush_domains) == 0) | 2151 | int ret; |
2152 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2152 | |
2153 | ret = ring->flush(ring, invalidate_domains, flush_domains); | ||
2154 | if (ret) | ||
2155 | return ret; | ||
2156 | |||
2157 | i915_gem_process_flushing_list(dev, flush_domains, ring); | ||
2158 | return 0; | ||
2153 | } | 2159 | } |
2154 | 2160 | ||
2155 | static int i915_ring_idle(struct drm_device *dev, | 2161 | static int i915_ring_idle(struct drm_device *dev, |
2156 | struct intel_ring_buffer *ring) | 2162 | struct intel_ring_buffer *ring) |
2157 | { | 2163 | { |
2164 | int ret; | ||
2165 | |||
2158 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2166 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2159 | return 0; | 2167 | return 0; |
2160 | 2168 | ||
2161 | if (!list_empty(&ring->gpu_write_list)) | 2169 | if (!list_empty(&ring->gpu_write_list)) { |
2162 | i915_gem_flush_ring(dev, ring, | 2170 | ret = i915_gem_flush_ring(dev, ring, |
2163 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2171 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2172 | if (ret) | ||
2173 | return ret; | ||
2174 | } | ||
2175 | |||
2164 | return i915_wait_request(dev, | 2176 | return i915_wait_request(dev, |
2165 | i915_gem_next_request_seqno(dev, ring), | 2177 | i915_gem_next_request_seqno(dev, ring), |
2166 | ring); | 2178 | ring); |
@@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2370 | int ret; | 2382 | int ret; |
2371 | 2383 | ||
2372 | if (obj->fenced_gpu_access) { | 2384 | if (obj->fenced_gpu_access) { |
2373 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2385 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2374 | i915_gem_flush_ring(obj->base.dev, | 2386 | ret = i915_gem_flush_ring(obj->base.dev, |
2375 | obj->last_fenced_ring, | 2387 | obj->last_fenced_ring, |
2376 | 0, obj->base.write_domain); | 2388 | 0, obj->base.write_domain); |
2389 | if (ret) | ||
2390 | return ret; | ||
2391 | } | ||
2377 | 2392 | ||
2378 | obj->fenced_gpu_access = false; | 2393 | obj->fenced_gpu_access = false; |
2379 | } | 2394 | } |
@@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2529 | return ret; | 2544 | return ret; |
2530 | } else if (obj->tiling_changed) { | 2545 | } else if (obj->tiling_changed) { |
2531 | if (obj->fenced_gpu_access) { | 2546 | if (obj->fenced_gpu_access) { |
2532 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2547 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2533 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 2548 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2534 | 0, obj->base.write_domain); | 2549 | 0, obj->base.write_domain); |
2550 | if (ret) | ||
2551 | return ret; | ||
2552 | } | ||
2535 | 2553 | ||
2536 | obj->fenced_gpu_access = false; | 2554 | obj->fenced_gpu_access = false; |
2537 | } | 2555 | } |
@@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2817 | } | 2835 | } |
2818 | 2836 | ||
2819 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2837 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2820 | static void | 2838 | static int |
2821 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | 2839 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2822 | { | 2840 | { |
2823 | struct drm_device *dev = obj->base.dev; | 2841 | struct drm_device *dev = obj->base.dev; |
2824 | 2842 | ||
2825 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2843 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2826 | return; | 2844 | return 0; |
2827 | 2845 | ||
2828 | /* Queue the GPU write cache flushing we need. */ | 2846 | /* Queue the GPU write cache flushing we need. */ |
2829 | i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); | 2847 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2830 | BUG_ON(obj->base.write_domain); | ||
2831 | } | 2848 | } |
2832 | 2849 | ||
2833 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2850 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2894 | if (obj->gtt_space == NULL) | 2911 | if (obj->gtt_space == NULL) |
2895 | return -EINVAL; | 2912 | return -EINVAL; |
2896 | 2913 | ||
2897 | i915_gem_object_flush_gpu_write_domain(obj); | 2914 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2915 | if (ret) | ||
2916 | return ret; | ||
2917 | |||
2898 | if (obj->pending_gpu_write || write) { | 2918 | if (obj->pending_gpu_write || write) { |
2899 | ret = i915_gem_object_wait_rendering(obj, true); | 2919 | ret = i915_gem_object_wait_rendering(obj, true); |
2900 | if (ret) | 2920 | if (ret) |
@@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | |||
2939 | if (obj->gtt_space == NULL) | 2959 | if (obj->gtt_space == NULL) |
2940 | return -EINVAL; | 2960 | return -EINVAL; |
2941 | 2961 | ||
2942 | i915_gem_object_flush_gpu_write_domain(obj); | 2962 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2963 | if (ret) | ||
2964 | return ret; | ||
2965 | |||
2943 | 2966 | ||
2944 | /* Currently, we are always called from an non-interruptible context. */ | 2967 | /* Currently, we are always called from an non-interruptible context. */ |
2945 | if (pipelined != obj->ring) { | 2968 | if (pipelined != obj->ring) { |
@@ -2964,12 +2987,17 @@ int | |||
2964 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 2987 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
2965 | bool interruptible) | 2988 | bool interruptible) |
2966 | { | 2989 | { |
2990 | int ret; | ||
2991 | |||
2967 | if (!obj->active) | 2992 | if (!obj->active) |
2968 | return 0; | 2993 | return 0; |
2969 | 2994 | ||
2970 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2995 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2971 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 2996 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2972 | 0, obj->base.write_domain); | 2997 | 0, obj->base.write_domain); |
2998 | if (ret) | ||
2999 | return ret; | ||
3000 | } | ||
2973 | 3001 | ||
2974 | return i915_gem_object_wait_rendering(obj, interruptible); | 3002 | return i915_gem_object_wait_rendering(obj, interruptible); |
2975 | } | 3003 | } |
@@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
2986 | uint32_t old_write_domain, old_read_domains; | 3014 | uint32_t old_write_domain, old_read_domains; |
2987 | int ret; | 3015 | int ret; |
2988 | 3016 | ||
2989 | i915_gem_object_flush_gpu_write_domain(obj); | 3017 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3018 | if (ret) | ||
3019 | return ret; | ||
3020 | |||
2990 | ret = i915_gem_object_wait_rendering(obj, true); | 3021 | ret = i915_gem_object_wait_rendering(obj, true); |
2991 | if (ret) | 3022 | if (ret) |
2992 | return ret; | 3023 | return ret; |
@@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | |||
3081 | if (offset == 0 && size == obj->base.size) | 3112 | if (offset == 0 && size == obj->base.size) |
3082 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3113 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3083 | 3114 | ||
3084 | i915_gem_object_flush_gpu_write_domain(obj); | 3115 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3116 | if (ret) | ||
3117 | return ret; | ||
3118 | |||
3085 | ret = i915_gem_object_wait_rendering(obj, true); | 3119 | ret = i915_gem_object_wait_rendering(obj, true); |
3086 | if (ret) | 3120 | if (ret) |
3087 | return ret; | 3121 | return ret; |
@@ -3374,8 +3408,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3374 | * flush earlier is beneficial. | 3408 | * flush earlier is beneficial. |
3375 | */ | 3409 | */ |
3376 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3410 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3377 | i915_gem_flush_ring(dev, obj->ring, | 3411 | ret = i915_gem_flush_ring(dev, obj->ring, |
3378 | 0, obj->base.write_domain); | 3412 | 0, obj->base.write_domain); |
3379 | } else if (obj->ring->outstanding_lazy_request == | 3413 | } else if (obj->ring->outstanding_lazy_request == |
3380 | obj->last_rendering_seqno) { | 3414 | obj->last_rendering_seqno) { |
3381 | struct drm_i915_gem_request *request; | 3415 | struct drm_i915_gem_request *request; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1b78b66dd77e..97d5fbd8ea13 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -713,14 +713,14 @@ err: | |||
713 | return ret; | 713 | return ret; |
714 | } | 714 | } |
715 | 715 | ||
716 | static void | 716 | static int |
717 | i915_gem_execbuffer_flush(struct drm_device *dev, | 717 | i915_gem_execbuffer_flush(struct drm_device *dev, |
718 | uint32_t invalidate_domains, | 718 | uint32_t invalidate_domains, |
719 | uint32_t flush_domains, | 719 | uint32_t flush_domains, |
720 | uint32_t flush_rings) | 720 | uint32_t flush_rings) |
721 | { | 721 | { |
722 | drm_i915_private_t *dev_priv = dev->dev_private; | 722 | drm_i915_private_t *dev_priv = dev->dev_private; |
723 | int i; | 723 | int i, ret; |
724 | 724 | ||
725 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 725 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
726 | intel_gtt_chipset_flush(); | 726 | intel_gtt_chipset_flush(); |
@@ -730,11 +730,17 @@ i915_gem_execbuffer_flush(struct drm_device *dev, | |||
730 | 730 | ||
731 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 731 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
732 | for (i = 0; i < I915_NUM_RINGS; i++) | 732 | for (i = 0; i < I915_NUM_RINGS; i++) |
733 | if (flush_rings & (1 << i)) | 733 | if (flush_rings & (1 << i)) { |
734 | i915_gem_flush_ring(dev, &dev_priv->ring[i], | 734 | ret = i915_gem_flush_ring(dev, |
735 | invalidate_domains, | 735 | &dev_priv->ring[i], |
736 | flush_domains); | 736 | invalidate_domains, |
737 | flush_domains); | ||
738 | if (ret) | ||
739 | return ret; | ||
740 | } | ||
737 | } | 741 | } |
742 | |||
743 | return 0; | ||
738 | } | 744 | } |
739 | 745 | ||
740 | static int | 746 | static int |
@@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
798 | cd.invalidate_domains, | 804 | cd.invalidate_domains, |
799 | cd.flush_domains); | 805 | cd.flush_domains); |
800 | #endif | 806 | #endif |
801 | i915_gem_execbuffer_flush(ring->dev, | 807 | ret = i915_gem_execbuffer_flush(ring->dev, |
802 | cd.invalidate_domains, | 808 | cd.invalidate_domains, |
803 | cd.flush_domains, | 809 | cd.flush_domains, |
804 | cd.flush_rings); | 810 | cd.flush_rings); |
811 | if (ret) | ||
812 | return ret; | ||
805 | } | 813 | } |
806 | 814 | ||
807 | list_for_each_entry(obj, objects, exec_list) { | 815 | list_for_each_entry(obj, objects, exec_list) { |