aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-11-01 05:26:26 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-21 11:45:06 -0500
commitb4a98e57fc27854b5938fc8b08b68e5e68b91e1f (patch)
tree61db63ff4f2e3d7beabef9ef1a50079038e2467e /drivers/gpu/drm/i915/intel_display.c
parenta726915cef1daab57aad4c5b5e4773822f0a4bf8 (diff)
drm/i915: Flush outstanding unpin tasks before pageflipping
If we accumulate unpin tasks because we are pageflipping faster than the system can schedule its workers, we can effectively create a pin-leak. The solution taken here is to limit the number of unpin tasks we have per-crtc and to flush those outstanding tasks if we accumulate too many. This should prevent any jitter in the normal case, and also prevent the hang if we should run too fast. Note: It is important that we switch from the system workqueue to our own dev_priv->wq since all work items on that queue are guaranteed to only need the dev->struct_mutex and not any modeset resources. For otherwise if we have a work item ahead in the queue which needs the modeset lock (like the output detect work used by both polling or hpd), this work and so the unpin work will never execute since the pageflip code already holds that lock. Unfortunately there's no lockdep support for this scenario in the workqueue code. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=46991 Reported-and-tested-by: Tvrtko Ursulin <tvrtko.ursulin@onelan.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Added note about workqueu deadlock.] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=56337 Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e41f46cf222..797953376954 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6896,14 +6896,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6896{ 6896{
6897 struct intel_unpin_work *work = 6897 struct intel_unpin_work *work =
6898 container_of(__work, struct intel_unpin_work, work); 6898 container_of(__work, struct intel_unpin_work, work);
6899 struct drm_device *dev = work->crtc->dev;
6899 6900
6900 mutex_lock(&work->dev->struct_mutex); 6901 mutex_lock(&dev->struct_mutex);
6901 intel_unpin_fb_obj(work->old_fb_obj); 6902 intel_unpin_fb_obj(work->old_fb_obj);
6902 drm_gem_object_unreference(&work->pending_flip_obj->base); 6903 drm_gem_object_unreference(&work->pending_flip_obj->base);
6903 drm_gem_object_unreference(&work->old_fb_obj->base); 6904 drm_gem_object_unreference(&work->old_fb_obj->base);
6904 6905
6905 intel_update_fbc(work->dev); 6906 intel_update_fbc(dev);
6906 mutex_unlock(&work->dev->struct_mutex); 6907 mutex_unlock(&dev->struct_mutex);
6908
6909 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
6910 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
6911
6907 kfree(work); 6912 kfree(work);
6908} 6913}
6909 6914
@@ -6951,9 +6956,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6951 6956
6952 atomic_clear_mask(1 << intel_crtc->plane, 6957 atomic_clear_mask(1 << intel_crtc->plane,
6953 &obj->pending_flip.counter); 6958 &obj->pending_flip.counter);
6954
6955 wake_up(&dev_priv->pending_flip_queue); 6959 wake_up(&dev_priv->pending_flip_queue);
6956 schedule_work(&work->work); 6960
6961 queue_work(dev_priv->wq, &work->work);
6957 6962
6958 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 6963 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6959} 6964}
@@ -7254,7 +7259,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7254 return -ENOMEM; 7259 return -ENOMEM;
7255 7260
7256 work->event = event; 7261 work->event = event;
7257 work->dev = crtc->dev; 7262 work->crtc = crtc;
7258 intel_fb = to_intel_framebuffer(crtc->fb); 7263 intel_fb = to_intel_framebuffer(crtc->fb);
7259 work->old_fb_obj = intel_fb->obj; 7264 work->old_fb_obj = intel_fb->obj;
7260 INIT_WORK(&work->work, intel_unpin_work_fn); 7265 INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -7279,6 +7284,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7279 intel_fb = to_intel_framebuffer(fb); 7284 intel_fb = to_intel_framebuffer(fb);
7280 obj = intel_fb->obj; 7285 obj = intel_fb->obj;
7281 7286
7287 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7288 flush_workqueue(dev_priv->wq);
7289
7282 ret = i915_mutex_lock_interruptible(dev); 7290 ret = i915_mutex_lock_interruptible(dev);
7283 if (ret) 7291 if (ret)
7284 goto cleanup; 7292 goto cleanup;
@@ -7297,6 +7305,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7297 * the flip occurs and the object is no longer visible. 7305 * the flip occurs and the object is no longer visible.
7298 */ 7306 */
7299 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7307 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7308 atomic_inc(&intel_crtc->unpin_work_count);
7300 7309
7301 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7310 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7302 if (ret) 7311 if (ret)
@@ -7311,6 +7320,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7311 return 0; 7320 return 0;
7312 7321
7313cleanup_pending: 7322cleanup_pending:
7323 atomic_dec(&intel_crtc->unpin_work_count);
7314 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7324 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7315 drm_gem_object_unreference(&work->old_fb_obj->base); 7325 drm_gem_object_unreference(&work->old_fb_obj->base);
7316 drm_gem_object_unreference(&obj->base); 7326 drm_gem_object_unreference(&obj->base);