aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c32
6 files changed, 60 insertions, 71 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bd24861bcd95..6b86e83ae128 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -617,6 +617,12 @@ typedef struct drm_i915_private {
617 struct delayed_work retire_work; 617 struct delayed_work retire_work;
618 618
619 /** 619 /**
620 * Are we in a non-interruptible section of code like
621 * modesetting?
622 */
623 bool interruptible;
624
625 /**
620 * Flag if the X Server, and thus DRM, is not currently in 626 * Flag if the X Server, and thus DRM, is not currently in
621 * control of the device. 627 * control of the device.
622 * 628 *
@@ -1110,8 +1116,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1110void i915_gem_lastclose(struct drm_device *dev); 1116void i915_gem_lastclose(struct drm_device *dev);
1111 1117
1112int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1118int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1113int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1119int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1114 bool interruptible);
1115void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1120void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1116 struct intel_ring_buffer *ring, 1121 struct intel_ring_buffer *ring,
1117 u32 seqno); 1122 u32 seqno);
@@ -1133,8 +1138,7 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1133} 1138}
1134 1139
1135int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1140int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1136 struct intel_ring_buffer *pipelined, 1141 struct intel_ring_buffer *pipelined);
1137 bool interruptible);
1138int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1142int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1139 1143
1140void i915_gem_retire_requests(struct drm_device *dev); 1144void i915_gem_retire_requests(struct drm_device *dev);
@@ -1143,8 +1147,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1143int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1147int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1144 uint32_t read_domains, 1148 uint32_t read_domains,
1145 uint32_t write_domain); 1149 uint32_t write_domain);
1146int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 1150int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
1147 bool interruptible);
1148int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1151int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1149void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1152void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1150void i915_gem_do_init(struct drm_device *dev, 1153void i915_gem_do_init(struct drm_device *dev,
@@ -1157,8 +1160,7 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring,
1157 struct drm_file *file, 1160 struct drm_file *file,
1158 struct drm_i915_gem_request *request); 1161 struct drm_i915_gem_request *request);
1159int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1162int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1160 uint32_t seqno, 1163 uint32_t seqno);
1161 bool interruptible);
1162int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1164int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1163int __must_check 1165int __must_check
1164i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1166i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f5094bb82d32..ac23dcf084be 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1200,7 +1200,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1200 if (obj->tiling_mode == I915_TILING_NONE) 1200 if (obj->tiling_mode == I915_TILING_NONE)
1201 ret = i915_gem_object_put_fence(obj); 1201 ret = i915_gem_object_put_fence(obj);
1202 else 1202 else
1203 ret = i915_gem_object_get_fence(obj, NULL, true); 1203 ret = i915_gem_object_get_fence(obj, NULL);
1204 if (ret) 1204 if (ret)
1205 goto unlock; 1205 goto unlock;
1206 1206
@@ -1989,8 +1989,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1989 */ 1989 */
1990int 1990int
1991i915_wait_request(struct intel_ring_buffer *ring, 1991i915_wait_request(struct intel_ring_buffer *ring,
1992 uint32_t seqno, 1992 uint32_t seqno)
1993 bool interruptible)
1994{ 1993{
1995 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1994 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1996 u32 ier; 1995 u32 ier;
@@ -2043,7 +2042,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
2043 2042
2044 ring->waiting_seqno = seqno; 2043 ring->waiting_seqno = seqno;
2045 if (ring->irq_get(ring)) { 2044 if (ring->irq_get(ring)) {
2046 if (interruptible) 2045 if (dev_priv->mm.interruptible)
2047 ret = wait_event_interruptible(ring->irq_queue, 2046 ret = wait_event_interruptible(ring->irq_queue,
2048 i915_seqno_passed(ring->get_seqno(ring), seqno) 2047 i915_seqno_passed(ring->get_seqno(ring), seqno)
2049 || atomic_read(&dev_priv->mm.wedged)); 2048 || atomic_read(&dev_priv->mm.wedged));
@@ -2085,8 +2084,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
2085 * safe to unbind from the GTT or access from the CPU. 2084 * safe to unbind from the GTT or access from the CPU.
2086 */ 2085 */
2087int 2086int
2088i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 2087i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2089 bool interruptible)
2090{ 2088{
2091 int ret; 2089 int ret;
2092 2090
@@ -2099,9 +2097,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2099 * it. 2097 * it.
2100 */ 2098 */
2101 if (obj->active) { 2099 if (obj->active) {
2102 ret = i915_wait_request(obj->ring, 2100 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2103 obj->last_rendering_seqno,
2104 interruptible);
2105 if (ret) 2101 if (ret)
2106 return ret; 2102 return ret;
2107 } 2103 }
@@ -2202,9 +2198,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
2202 return ret; 2198 return ret;
2203 } 2199 }
2204 2200
2205 return i915_wait_request(ring, 2201 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2206 i915_gem_next_request_seqno(ring),
2207 true);
2208} 2202}
2209 2203
2210int 2204int
@@ -2405,8 +2399,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2405 2399
2406static int 2400static int
2407i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2401i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2408 struct intel_ring_buffer *pipelined, 2402 struct intel_ring_buffer *pipelined)
2409 bool interruptible)
2410{ 2403{
2411 int ret; 2404 int ret;
2412 2405
@@ -2425,9 +2418,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2425 if (!ring_passed_seqno(obj->last_fenced_ring, 2418 if (!ring_passed_seqno(obj->last_fenced_ring,
2426 obj->last_fenced_seqno)) { 2419 obj->last_fenced_seqno)) {
2427 ret = i915_wait_request(obj->last_fenced_ring, 2420 ret = i915_wait_request(obj->last_fenced_ring,
2428 obj->last_fenced_seqno, 2421 obj->last_fenced_seqno);
2429 interruptible);
2430
2431 if (ret) 2422 if (ret)
2432 return ret; 2423 return ret;
2433 } 2424 }
@@ -2453,7 +2444,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2453 if (obj->tiling_mode) 2444 if (obj->tiling_mode)
2454 i915_gem_release_mmap(obj); 2445 i915_gem_release_mmap(obj);
2455 2446
2456 ret = i915_gem_object_flush_fence(obj, NULL, true); 2447 ret = i915_gem_object_flush_fence(obj, NULL);
2457 if (ret) 2448 if (ret)
2458 return ret; 2449 return ret;
2459 2450
@@ -2530,8 +2521,7 @@ i915_find_fence_reg(struct drm_device *dev,
2530 */ 2521 */
2531int 2522int
2532i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 2523i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2533 struct intel_ring_buffer *pipelined, 2524 struct intel_ring_buffer *pipelined)
2534 bool interruptible)
2535{ 2525{
2536 struct drm_device *dev = obj->base.dev; 2526 struct drm_device *dev = obj->base.dev;
2537 struct drm_i915_private *dev_priv = dev->dev_private; 2527 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2554,8 +2544,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2554 if (!ring_passed_seqno(obj->last_fenced_ring, 2544 if (!ring_passed_seqno(obj->last_fenced_ring,
2555 reg->setup_seqno)) { 2545 reg->setup_seqno)) {
2556 ret = i915_wait_request(obj->last_fenced_ring, 2546 ret = i915_wait_request(obj->last_fenced_ring,
2557 reg->setup_seqno, 2547 reg->setup_seqno);
2558 interruptible);
2559 if (ret) 2548 if (ret)
2560 return ret; 2549 return ret;
2561 } 2550 }
@@ -2564,9 +2553,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2564 } 2553 }
2565 } else if (obj->last_fenced_ring && 2554 } else if (obj->last_fenced_ring &&
2566 obj->last_fenced_ring != pipelined) { 2555 obj->last_fenced_ring != pipelined) {
2567 ret = i915_gem_object_flush_fence(obj, 2556 ret = i915_gem_object_flush_fence(obj, pipelined);
2568 pipelined,
2569 interruptible);
2570 if (ret) 2557 if (ret)
2571 return ret; 2558 return ret;
2572 } else if (obj->tiling_changed) { 2559 } else if (obj->tiling_changed) {
@@ -2603,7 +2590,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2603 if (reg == NULL) 2590 if (reg == NULL)
2604 return -ENOSPC; 2591 return -ENOSPC;
2605 2592
2606 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); 2593 ret = i915_gem_object_flush_fence(obj, pipelined);
2607 if (ret) 2594 if (ret)
2608 return ret; 2595 return ret;
2609 2596
@@ -2615,9 +2602,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2615 if (old->tiling_mode) 2602 if (old->tiling_mode)
2616 i915_gem_release_mmap(old); 2603 i915_gem_release_mmap(old);
2617 2604
2618 ret = i915_gem_object_flush_fence(old, 2605 ret = i915_gem_object_flush_fence(old, pipelined);
2619 pipelined,
2620 interruptible);
2621 if (ret) { 2606 if (ret) {
2622 drm_gem_object_unreference(&old->base); 2607 drm_gem_object_unreference(&old->base);
2623 return ret; 2608 return ret;
@@ -2940,7 +2925,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2940 return ret; 2925 return ret;
2941 2926
2942 if (obj->pending_gpu_write || write) { 2927 if (obj->pending_gpu_write || write) {
2943 ret = i915_gem_object_wait_rendering(obj, true); 2928 ret = i915_gem_object_wait_rendering(obj);
2944 if (ret) 2929 if (ret)
2945 return ret; 2930 return ret;
2946 } 2931 }
@@ -2990,7 +2975,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2990 2975
2991 /* Currently, we are always called from an non-interruptible context. */ 2976 /* Currently, we are always called from an non-interruptible context. */
2992 if (pipelined != obj->ring) { 2977 if (pipelined != obj->ring) {
2993 ret = i915_gem_object_wait_rendering(obj, false); 2978 ret = i915_gem_object_wait_rendering(obj);
2994 if (ret) 2979 if (ret)
2995 return ret; 2980 return ret;
2996 } 2981 }
@@ -3008,8 +2993,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3008} 2993}
3009 2994
3010int 2995int
3011i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 2996i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3012 bool interruptible)
3013{ 2997{
3014 int ret; 2998 int ret;
3015 2999
@@ -3022,7 +3006,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3022 return ret; 3006 return ret;
3023 } 3007 }
3024 3008
3025 return i915_gem_object_wait_rendering(obj, interruptible); 3009 return i915_gem_object_wait_rendering(obj);
3026} 3010}
3027 3011
3028/** 3012/**
@@ -3044,7 +3028,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3044 if (ret) 3028 if (ret)
3045 return ret; 3029 return ret;
3046 3030
3047 ret = i915_gem_object_wait_rendering(obj, true); 3031 ret = i915_gem_object_wait_rendering(obj);
3048 if (ret) 3032 if (ret)
3049 return ret; 3033 return ret;
3050 3034
@@ -3142,7 +3126,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3142 if (ret) 3126 if (ret)
3143 return ret; 3127 return ret;
3144 3128
3145 ret = i915_gem_object_wait_rendering(obj, true); 3129 ret = i915_gem_object_wait_rendering(obj);
3146 if (ret) 3130 if (ret)
3147 return ret; 3131 return ret;
3148 3132
@@ -3842,6 +3826,8 @@ i915_gem_load(struct drm_device *dev)
3842 i915_gem_detect_bit_6_swizzle(dev); 3826 i915_gem_detect_bit_6_swizzle(dev);
3843 init_waitqueue_head(&dev_priv->pending_flip_queue); 3827 init_waitqueue_head(&dev_priv->pending_flip_queue);
3844 3828
3829 dev_priv->mm.interruptible = true;
3830
3845 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; 3831 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3846 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; 3832 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3847 register_shrinker(&dev_priv->mm.inactive_shrinker); 3833 register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f0c93b2e6c68..71a4a3b69158 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,7 +560,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
560 560
561 if (has_fenced_gpu_access) { 561 if (has_fenced_gpu_access) {
562 if (need_fence) { 562 if (need_fence) {
563 ret = i915_gem_object_get_fence(obj, ring, 1); 563 ret = i915_gem_object_get_fence(obj, ring);
564 if (ret) 564 if (ret)
565 break; 565 break;
566 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && 566 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -756,7 +756,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
756 756
757 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 757 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
758 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 758 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
759 return i915_gem_object_wait_rendering(obj, true); 759 return i915_gem_object_wait_rendering(obj);
760 760
761 idx = intel_ring_sync_index(from, to); 761 idx = intel_ring_sync_index(from, to);
762 762
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 40fcbc91139c..c19e974c0019 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2067,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2067 struct drm_i915_gem_object *obj, 2067 struct drm_i915_gem_object *obj,
2068 struct intel_ring_buffer *pipelined) 2068 struct intel_ring_buffer *pipelined)
2069{ 2069{
2070 struct drm_i915_private *dev_priv = dev->dev_private;
2070 u32 alignment; 2071 u32 alignment;
2071 int ret; 2072 int ret;
2072 2073
@@ -2091,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2091 BUG(); 2092 BUG();
2092 } 2093 }
2093 2094
2095 dev_priv->mm.interruptible = false;
2094 ret = i915_gem_object_pin(obj, alignment, true); 2096 ret = i915_gem_object_pin(obj, alignment, true);
2095 if (ret) 2097 if (ret)
2096 return ret; 2098 goto err_interruptible;
2097 2099
2098 ret = i915_gem_object_set_to_display_plane(obj, pipelined); 2100 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
2099 if (ret) 2101 if (ret)
@@ -2105,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2105 * a fence as the cost is not that onerous. 2107 * a fence as the cost is not that onerous.
2106 */ 2108 */
2107 if (obj->tiling_mode != I915_TILING_NONE) { 2109 if (obj->tiling_mode != I915_TILING_NONE) {
2108 ret = i915_gem_object_get_fence(obj, pipelined, false); 2110 ret = i915_gem_object_get_fence(obj, pipelined);
2109 if (ret) 2111 if (ret)
2110 goto err_unpin; 2112 goto err_unpin;
2111 } 2113 }
2112 2114
2115 dev_priv->mm.interruptible = true;
2113 return 0; 2116 return 0;
2114 2117
2115err_unpin: 2118err_unpin:
2116 i915_gem_object_unpin(obj); 2119 i915_gem_object_unpin(obj);
2120err_interruptible:
2121 dev_priv->mm.interruptible = true;
2117 return ret; 2122 return ret;
2118} 2123}
2119 2124
@@ -2247,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2247 * This should only fail upon a hung GPU, in which case we 2252 * This should only fail upon a hung GPU, in which case we
2248 * can safely continue. 2253 * can safely continue.
2249 */ 2254 */
2250 ret = i915_gem_object_flush_gpu(obj, false); 2255 ret = i915_gem_object_flush_gpu(obj);
2251 (void) ret; 2256 (void) ret;
2252 } 2257 }
2253 2258
@@ -2994,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2994{ 2999{
2995 if (!enable && intel_crtc->overlay) { 3000 if (!enable && intel_crtc->overlay) {
2996 struct drm_device *dev = intel_crtc->base.dev; 3001 struct drm_device *dev = intel_crtc->base.dev;
3002 struct drm_i915_private *dev_priv = dev->dev_private;
2997 3003
2998 mutex_lock(&dev->struct_mutex); 3004 mutex_lock(&dev->struct_mutex);
2999 (void) intel_overlay_switch_off(intel_crtc->overlay, false); 3005 dev_priv->mm.interruptible = false;
3006 (void) intel_overlay_switch_off(intel_crtc->overlay);
3007 dev_priv->mm.interruptible = true;
3000 mutex_unlock(&dev->struct_mutex); 3008 mutex_unlock(&dev->struct_mutex);
3001 } 3009 }
3002 3010
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aae4806203db..08cd27d2c132 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -329,8 +329,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
329 329
330extern void intel_setup_overlay(struct drm_device *dev); 330extern void intel_setup_overlay(struct drm_device *dev);
331extern void intel_cleanup_overlay(struct drm_device *dev); 331extern void intel_cleanup_overlay(struct drm_device *dev);
332extern int intel_overlay_switch_off(struct intel_overlay *overlay, 332extern int intel_overlay_switch_off(struct intel_overlay *overlay);
333 bool interruptible);
334extern int intel_overlay_put_image(struct drm_device *dev, void *data, 333extern int intel_overlay_put_image(struct drm_device *dev, void *data,
335 struct drm_file *file_priv); 334 struct drm_file *file_priv);
336extern int intel_overlay_attrs(struct drm_device *dev, void *data, 335extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 50bc865139aa..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
213 213
214static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 214static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215 struct drm_i915_gem_request *request, 215 struct drm_i915_gem_request *request,
216 bool interruptible,
217 void (*tail)(struct intel_overlay *)) 216 void (*tail)(struct intel_overlay *))
218{ 217{
219 struct drm_device *dev = overlay->dev; 218 struct drm_device *dev = overlay->dev;
@@ -228,8 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
228 } 227 }
229 overlay->last_flip_req = request->seqno; 228 overlay->last_flip_req = request->seqno;
230 overlay->flip_tail = tail; 229 overlay->flip_tail = tail;
231 ret = i915_wait_request(LP_RING(dev_priv), 230 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
232 overlay->last_flip_req, true);
233 if (ret) 231 if (ret)
234 return ret; 232 return ret;
235 233
@@ -321,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
321 OUT_RING(MI_NOOP); 319 OUT_RING(MI_NOOP);
322 ADVANCE_LP_RING(); 320 ADVANCE_LP_RING();
323 321
324 ret = intel_overlay_do_wait_request(overlay, request, true, NULL); 322 ret = intel_overlay_do_wait_request(overlay, request, NULL);
325out: 323out:
326 if (pipe_a_quirk) 324 if (pipe_a_quirk)
327 i830_deactivate_pipe_a(dev); 325 i830_deactivate_pipe_a(dev);
@@ -400,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
400} 398}
401 399
402/* overlay needs to be disabled in OCMD reg */ 400/* overlay needs to be disabled in OCMD reg */
403static int intel_overlay_off(struct intel_overlay *overlay, 401static int intel_overlay_off(struct intel_overlay *overlay)
404 bool interruptible)
405{ 402{
406 struct drm_device *dev = overlay->dev; 403 struct drm_device *dev = overlay->dev;
407 struct drm_i915_private *dev_priv = dev->dev_private; 404 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
436 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 433 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
437 ADVANCE_LP_RING(); 434 ADVANCE_LP_RING();
438 435
439 return intel_overlay_do_wait_request(overlay, request, interruptible, 436 return intel_overlay_do_wait_request(overlay, request,
440 intel_overlay_off_tail); 437 intel_overlay_off_tail);
441} 438}
442 439
443/* recover from an interruption due to a signal 440/* recover from an interruption due to a signal
444 * We have to be careful not to repeat work forever an make forward progess. */ 441 * We have to be careful not to repeat work forever an make forward progess. */
445static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 442static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
446 bool interruptible)
447{ 443{
448 struct drm_device *dev = overlay->dev; 444 struct drm_device *dev = overlay->dev;
449 drm_i915_private_t *dev_priv = dev->dev_private; 445 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -452,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
452 if (overlay->last_flip_req == 0) 448 if (overlay->last_flip_req == 0)
453 return 0; 449 return 0;
454 450
455 ret = i915_wait_request(LP_RING(dev_priv), 451 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
456 overlay->last_flip_req, interruptible);
457 if (ret) 452 if (ret)
458 return ret; 453 return ret;
459 454
@@ -498,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
498 OUT_RING(MI_NOOP); 493 OUT_RING(MI_NOOP);
499 ADVANCE_LP_RING(); 494 ADVANCE_LP_RING();
500 495
501 ret = intel_overlay_do_wait_request(overlay, request, true, 496 ret = intel_overlay_do_wait_request(overlay, request,
502 intel_overlay_release_old_vid_tail); 497 intel_overlay_release_old_vid_tail);
503 if (ret) 498 if (ret)
504 return ret; 499 return ret;
@@ -867,8 +862,7 @@ out_unpin:
867 return ret; 862 return ret;
868} 863}
869 864
870int intel_overlay_switch_off(struct intel_overlay *overlay, 865int intel_overlay_switch_off(struct intel_overlay *overlay)
871 bool interruptible)
872{ 866{
873 struct overlay_registers *regs; 867 struct overlay_registers *regs;
874 struct drm_device *dev = overlay->dev; 868 struct drm_device *dev = overlay->dev;
@@ -877,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
877 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 871 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
878 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 872 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
879 873
880 ret = intel_overlay_recover_from_interrupt(overlay, interruptible); 874 ret = intel_overlay_recover_from_interrupt(overlay);
881 if (ret != 0) 875 if (ret != 0)
882 return ret; 876 return ret;
883 877
@@ -892,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
892 regs->OCMD = 0; 886 regs->OCMD = 0;
893 intel_overlay_unmap_regs(overlay, regs); 887 intel_overlay_unmap_regs(overlay, regs);
894 888
895 ret = intel_overlay_off(overlay, interruptible); 889 ret = intel_overlay_off(overlay);
896 if (ret != 0) 890 if (ret != 0)
897 return ret; 891 return ret;
898 892
@@ -1134,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1134 mutex_lock(&dev->mode_config.mutex); 1128 mutex_lock(&dev->mode_config.mutex);
1135 mutex_lock(&dev->struct_mutex); 1129 mutex_lock(&dev->struct_mutex);
1136 1130
1137 ret = intel_overlay_switch_off(overlay, true); 1131 ret = intel_overlay_switch_off(overlay);
1138 1132
1139 mutex_unlock(&dev->struct_mutex); 1133 mutex_unlock(&dev->struct_mutex);
1140 mutex_unlock(&dev->mode_config.mutex); 1134 mutex_unlock(&dev->mode_config.mutex);
@@ -1170,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1170 goto out_unlock; 1164 goto out_unlock;
1171 } 1165 }
1172 1166
1173 ret = intel_overlay_recover_from_interrupt(overlay, true); 1167 ret = intel_overlay_recover_from_interrupt(overlay);
1174 if (ret != 0) 1168 if (ret != 0)
1175 goto out_unlock; 1169 goto out_unlock;
1176 1170
1177 if (overlay->crtc != crtc) { 1171 if (overlay->crtc != crtc) {
1178 struct drm_display_mode *mode = &crtc->base.mode; 1172 struct drm_display_mode *mode = &crtc->base.mode;
1179 ret = intel_overlay_switch_off(overlay, true); 1173 ret = intel_overlay_switch_off(overlay);
1180 if (ret != 0) 1174 if (ret != 0)
1181 goto out_unlock; 1175 goto out_unlock;
1182 1176