diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 102 |
1 files changed, 68 insertions, 34 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 07b62449b9e1..2873d068eb1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,18 +35,18 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | 38 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
41 | static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, | 41 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
42 | bool write); | 42 | bool write); |
43 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | 43 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
44 | uint64_t offset, | 44 | uint64_t offset, |
45 | uint64_t size); | 45 | uint64_t size); |
46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); | 46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
47 | static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 47 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
48 | unsigned alignment, | 48 | unsigned alignment, |
49 | bool map_and_fenceable); | 49 | bool map_and_fenceable); |
50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, | 50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
51 | struct drm_i915_fence_reg *reg); | 51 | struct drm_i915_fence_reg *reg); |
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, | 52 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
@@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2142 | return ret; | 2142 | return ret; |
2143 | } | 2143 | } |
2144 | 2144 | ||
2145 | void | 2145 | int |
2146 | i915_gem_flush_ring(struct drm_device *dev, | 2146 | i915_gem_flush_ring(struct drm_device *dev, |
2147 | struct intel_ring_buffer *ring, | 2147 | struct intel_ring_buffer *ring, |
2148 | uint32_t invalidate_domains, | 2148 | uint32_t invalidate_domains, |
2149 | uint32_t flush_domains) | 2149 | uint32_t flush_domains) |
2150 | { | 2150 | { |
2151 | if (ring->flush(ring, invalidate_domains, flush_domains) == 0) | 2151 | int ret; |
2152 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2152 | |
2153 | ret = ring->flush(ring, invalidate_domains, flush_domains); | ||
2154 | if (ret) | ||
2155 | return ret; | ||
2156 | |||
2157 | i915_gem_process_flushing_list(dev, flush_domains, ring); | ||
2158 | return 0; | ||
2153 | } | 2159 | } |
2154 | 2160 | ||
2155 | static int i915_ring_idle(struct drm_device *dev, | 2161 | static int i915_ring_idle(struct drm_device *dev, |
2156 | struct intel_ring_buffer *ring) | 2162 | struct intel_ring_buffer *ring) |
2157 | { | 2163 | { |
2164 | int ret; | ||
2165 | |||
2158 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2166 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2159 | return 0; | 2167 | return 0; |
2160 | 2168 | ||
2161 | if (!list_empty(&ring->gpu_write_list)) | 2169 | if (!list_empty(&ring->gpu_write_list)) { |
2162 | i915_gem_flush_ring(dev, ring, | 2170 | ret = i915_gem_flush_ring(dev, ring, |
2163 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2171 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2172 | if (ret) | ||
2173 | return ret; | ||
2174 | } | ||
2175 | |||
2164 | return i915_wait_request(dev, | 2176 | return i915_wait_request(dev, |
2165 | i915_gem_next_request_seqno(dev, ring), | 2177 | i915_gem_next_request_seqno(dev, ring), |
2166 | ring); | 2178 | ring); |
@@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2370 | int ret; | 2382 | int ret; |
2371 | 2383 | ||
2372 | if (obj->fenced_gpu_access) { | 2384 | if (obj->fenced_gpu_access) { |
2373 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2385 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2374 | i915_gem_flush_ring(obj->base.dev, | 2386 | ret = i915_gem_flush_ring(obj->base.dev, |
2375 | obj->last_fenced_ring, | 2387 | obj->last_fenced_ring, |
2376 | 0, obj->base.write_domain); | 2388 | 0, obj->base.write_domain); |
2389 | if (ret) | ||
2390 | return ret; | ||
2391 | } | ||
2377 | 2392 | ||
2378 | obj->fenced_gpu_access = false; | 2393 | obj->fenced_gpu_access = false; |
2379 | } | 2394 | } |
@@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2529 | return ret; | 2544 | return ret; |
2530 | } else if (obj->tiling_changed) { | 2545 | } else if (obj->tiling_changed) { |
2531 | if (obj->fenced_gpu_access) { | 2546 | if (obj->fenced_gpu_access) { |
2532 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2547 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2533 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 2548 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2534 | 0, obj->base.write_domain); | 2549 | 0, obj->base.write_domain); |
2550 | if (ret) | ||
2551 | return ret; | ||
2552 | } | ||
2535 | 2553 | ||
2536 | obj->fenced_gpu_access = false; | 2554 | obj->fenced_gpu_access = false; |
2537 | } | 2555 | } |
@@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2817 | } | 2835 | } |
2818 | 2836 | ||
2819 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2837 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2820 | static void | 2838 | static int |
2821 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | 2839 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2822 | { | 2840 | { |
2823 | struct drm_device *dev = obj->base.dev; | 2841 | struct drm_device *dev = obj->base.dev; |
2824 | 2842 | ||
2825 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2843 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2826 | return; | 2844 | return 0; |
2827 | 2845 | ||
2828 | /* Queue the GPU write cache flushing we need. */ | 2846 | /* Queue the GPU write cache flushing we need. */ |
2829 | i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); | 2847 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2830 | BUG_ON(obj->base.write_domain); | ||
2831 | } | 2848 | } |
2832 | 2849 | ||
2833 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2850 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2894 | if (obj->gtt_space == NULL) | 2911 | if (obj->gtt_space == NULL) |
2895 | return -EINVAL; | 2912 | return -EINVAL; |
2896 | 2913 | ||
2897 | i915_gem_object_flush_gpu_write_domain(obj); | 2914 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2915 | if (ret) | ||
2916 | return ret; | ||
2917 | |||
2898 | if (obj->pending_gpu_write || write) { | 2918 | if (obj->pending_gpu_write || write) { |
2899 | ret = i915_gem_object_wait_rendering(obj, true); | 2919 | ret = i915_gem_object_wait_rendering(obj, true); |
2900 | if (ret) | 2920 | if (ret) |
@@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | |||
2939 | if (obj->gtt_space == NULL) | 2959 | if (obj->gtt_space == NULL) |
2940 | return -EINVAL; | 2960 | return -EINVAL; |
2941 | 2961 | ||
2942 | i915_gem_object_flush_gpu_write_domain(obj); | 2962 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2963 | if (ret) | ||
2964 | return ret; | ||
2965 | |||
2943 | 2966 | ||
2944 | /* Currently, we are always called from an non-interruptible context. */ | 2967 | /* Currently, we are always called from an non-interruptible context. */ |
2945 | if (pipelined != obj->ring) { | 2968 | if (pipelined != obj->ring) { |
@@ -2964,12 +2987,17 @@ int | |||
2964 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 2987 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
2965 | bool interruptible) | 2988 | bool interruptible) |
2966 | { | 2989 | { |
2990 | int ret; | ||
2991 | |||
2967 | if (!obj->active) | 2992 | if (!obj->active) |
2968 | return 0; | 2993 | return 0; |
2969 | 2994 | ||
2970 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2995 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2971 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 2996 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2972 | 0, obj->base.write_domain); | 2997 | 0, obj->base.write_domain); |
2998 | if (ret) | ||
2999 | return ret; | ||
3000 | } | ||
2973 | 3001 | ||
2974 | return i915_gem_object_wait_rendering(obj, interruptible); | 3002 | return i915_gem_object_wait_rendering(obj, interruptible); |
2975 | } | 3003 | } |
@@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
2986 | uint32_t old_write_domain, old_read_domains; | 3014 | uint32_t old_write_domain, old_read_domains; |
2987 | int ret; | 3015 | int ret; |
2988 | 3016 | ||
2989 | i915_gem_object_flush_gpu_write_domain(obj); | 3017 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3018 | if (ret) | ||
3019 | return ret; | ||
3020 | |||
2990 | ret = i915_gem_object_wait_rendering(obj, true); | 3021 | ret = i915_gem_object_wait_rendering(obj, true); |
2991 | if (ret) | 3022 | if (ret) |
2992 | return ret; | 3023 | return ret; |
@@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | |||
3081 | if (offset == 0 && size == obj->base.size) | 3112 | if (offset == 0 && size == obj->base.size) |
3082 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3113 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3083 | 3114 | ||
3084 | i915_gem_object_flush_gpu_write_domain(obj); | 3115 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3116 | if (ret) | ||
3117 | return ret; | ||
3118 | |||
3085 | ret = i915_gem_object_wait_rendering(obj, true); | 3119 | ret = i915_gem_object_wait_rendering(obj, true); |
3086 | if (ret) | 3120 | if (ret) |
3087 | return ret; | 3121 | return ret; |
@@ -3374,8 +3408,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3374 | * flush earlier is beneficial. | 3408 | * flush earlier is beneficial. |
3375 | */ | 3409 | */ |
3376 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3410 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3377 | i915_gem_flush_ring(dev, obj->ring, | 3411 | ret = i915_gem_flush_ring(dev, obj->ring, |
3378 | 0, obj->base.write_domain); | 3412 | 0, obj->base.write_domain); |
3379 | } else if (obj->ring->outstanding_lazy_request == | 3413 | } else if (obj->ring->outstanding_lazy_request == |
3380 | obj->last_rendering_seqno) { | 3414 | obj->last_rendering_seqno) { |
3381 | struct drm_i915_gem_request *request; | 3415 | struct drm_i915_gem_request *request; |