aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c138
1 files changed, 85 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ca55c40353a5..862375694431 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -121,6 +121,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
121 * Emit blits for scheduled buffer swaps. 121 * Emit blits for scheduled buffer swaps.
122 * 122 *
123 * This function will be called with the HW lock held. 123 * This function will be called with the HW lock held.
124 * Because this function must grab the ring mutex (dev->struct_mutex),
125 * it can no longer run at soft irq time. We'll fix this when we do
126 * the DRI2 swap buffer work.
124 */ 127 */
125static void i915_vblank_tasklet(struct drm_device *dev) 128static void i915_vblank_tasklet(struct drm_device *dev)
126{ 129{
@@ -141,6 +144,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
141 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); 144 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
142 RING_LOCALS; 145 RING_LOCALS;
143 146
147 mutex_lock(&dev->struct_mutex);
148
144 if (IS_I965G(dev) && sarea_priv->front_tiled) { 149 if (IS_I965G(dev) && sarea_priv->front_tiled) {
145 cmd |= XY_SRC_COPY_BLT_DST_TILED; 150 cmd |= XY_SRC_COPY_BLT_DST_TILED;
146 dst_pitch >>= 2; 151 dst_pitch >>= 2;
@@ -150,8 +155,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
150 src_pitch >>= 2; 155 src_pitch >>= 2;
151 } 156 }
152 157
153 counter[0] = drm_vblank_count(dev, 0); 158 counter[0] = drm_vblank_count(dev, i915_get_plane(dev, 0));
154 counter[1] = drm_vblank_count(dev, 1); 159 counter[1] = drm_vblank_count(dev, i915_get_plane(dev, 1));
155 160
156 DRM_DEBUG("\n"); 161 DRM_DEBUG("\n");
157 162
@@ -165,7 +170,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 170 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
166 drm_i915_vbl_swap_t *vbl_swap = 171 drm_i915_vbl_swap_t *vbl_swap =
167 list_entry(list, drm_i915_vbl_swap_t, head); 172 list_entry(list, drm_i915_vbl_swap_t, head);
168 int pipe = i915_get_pipe(dev, vbl_swap->plane); 173 int pipe = vbl_swap->pipe;
169 174
170 if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 175 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
171 continue; 176 continue;
@@ -179,20 +184,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)
179 184
180 drw = drm_get_drawable_info(dev, vbl_swap->drw_id); 185 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
181 186
182 if (!drw) {
183 spin_unlock(&dev->drw_lock);
184 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
185 spin_lock(&dev_priv->swaps_lock);
186 continue;
187 }
188
189 list_for_each(hit, &hits) { 187 list_for_each(hit, &hits) {
190 drm_i915_vbl_swap_t *swap_cmp = 188 drm_i915_vbl_swap_t *swap_cmp =
191 list_entry(hit, drm_i915_vbl_swap_t, head); 189 list_entry(hit, drm_i915_vbl_swap_t, head);
192 struct drm_drawable_info *drw_cmp = 190 struct drm_drawable_info *drw_cmp =
193 drm_get_drawable_info(dev, swap_cmp->drw_id); 191 drm_get_drawable_info(dev, swap_cmp->drw_id);
194 192
195 if (drw_cmp && 193 /* Make sure both drawables are still
194 * around and have some rectangles before
195 * we look inside to order them for the
196 * blts below.
197 */
198 if (drw_cmp && drw_cmp->num_rects > 0 &&
199 drw && drw->num_rects > 0 &&
196 drw_cmp->rects[0].y1 > drw->rects[0].y1) { 200 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
197 list_add_tail(list, hit); 201 list_add_tail(list, hit);
198 break; 202 break;
@@ -212,6 +216,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
212 216
213 if (nhits == 0) { 217 if (nhits == 0) {
214 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 218 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
219 mutex_unlock(&dev->struct_mutex);
215 return; 220 return;
216 } 221 }
217 222
@@ -265,18 +270,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
265 drm_i915_vbl_swap_t *swap_hit = 270 drm_i915_vbl_swap_t *swap_hit =
266 list_entry(hit, drm_i915_vbl_swap_t, head); 271 list_entry(hit, drm_i915_vbl_swap_t, head);
267 struct drm_clip_rect *rect; 272 struct drm_clip_rect *rect;
268 int num_rects, plane; 273 int num_rects, pipe;
269 unsigned short top, bottom; 274 unsigned short top, bottom;
270 275
271 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 276 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
272 277
278 /* The drawable may have been destroyed since
279 * the vblank swap was queued
280 */
273 if (!drw) 281 if (!drw)
274 continue; 282 continue;
275 283
276 rect = drw->rects; 284 rect = drw->rects;
277 plane = swap_hit->plane; 285 pipe = swap_hit->pipe;
278 top = upper[plane]; 286 top = upper[pipe];
279 bottom = lower[plane]; 287 bottom = lower[pipe];
280 288
281 for (num_rects = drw->num_rects; num_rects--; rect++) { 289 for (num_rects = drw->num_rects; num_rects--; rect++) {
282 int y1 = max(rect->y1, top); 290 int y1 = max(rect->y1, top);
@@ -302,6 +310,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
302 } 310 }
303 311
304 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 312 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
313 mutex_unlock(&dev->struct_mutex);
305 314
306 list_for_each_safe(hit, tmp, &hits) { 315 list_for_each_safe(hit, tmp, &hits) {
307 drm_i915_vbl_swap_t *swap_hit = 316 drm_i915_vbl_swap_t *swap_hit =
@@ -350,18 +359,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
350} 359}
351 360
352void 361void
353i915_gem_vblank_work_handler(struct work_struct *work) 362i915_vblank_work_handler(struct work_struct *work)
354{ 363{
355 drm_i915_private_t *dev_priv; 364 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
356 struct drm_device *dev; 365 vblank_work);
366 struct drm_device *dev = dev_priv->dev;
367 unsigned long irqflags;
368
369 if (dev->lock.hw_lock == NULL) {
370 i915_vblank_tasklet(dev);
371 return;
372 }
357 373
358 dev_priv = container_of(work, drm_i915_private_t, 374 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
359 mm.vblank_work); 375 dev->locked_tasklet_func = i915_vblank_tasklet;
360 dev = dev_priv->dev; 376 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
377
378 /* Try to get the lock now, if this fails, the lock
379 * holder will execute the tasklet during unlock
380 */
381 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
382 return;
383
384 dev->lock.lock_time = jiffies;
385 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
386
387 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
388 dev->locked_tasklet_func = NULL;
389 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
361 390
362 mutex_lock(&dev->struct_mutex);
363 i915_vblank_tasklet(dev); 391 i915_vblank_tasklet(dev);
364 mutex_unlock(&dev->struct_mutex); 392 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
365} 393}
366 394
367irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 395irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -441,12 +469,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
441 if (iir & I915_ASLE_INTERRUPT) 469 if (iir & I915_ASLE_INTERRUPT)
442 opregion_asle_intr(dev); 470 opregion_asle_intr(dev);
443 471
444 if (vblank && dev_priv->swaps_pending > 0) { 472 if (vblank && dev_priv->swaps_pending > 0)
445 if (dev_priv->ring.ring_obj == NULL) 473 schedule_work(&dev_priv->vblank_work);
446 drm_locked_tasklet(dev, i915_vblank_tasklet);
447 else
448 schedule_work(&dev_priv->mm.vblank_work);
449 }
450 474
451 return IRQ_HANDLED; 475 return IRQ_HANDLED;
452} 476}
@@ -706,7 +730,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
706{ 730{
707 drm_i915_private_t *dev_priv = dev->dev_private; 731 drm_i915_private_t *dev_priv = dev->dev_private;
708 drm_i915_vblank_swap_t *swap = data; 732 drm_i915_vblank_swap_t *swap = data;
709 drm_i915_vbl_swap_t *vbl_swap; 733 drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
710 unsigned int pipe, seqtype, curseq, plane; 734 unsigned int pipe, seqtype, curseq, plane;
711 unsigned long irqflags; 735 unsigned long irqflags;
712 struct list_head *list; 736 struct list_head *list;
@@ -770,45 +794,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
770 } 794 }
771 } 795 }
772 796
797 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
798
799 if (!vbl_swap) {
800 DRM_ERROR("Failed to allocate memory to queue swap\n");
801 drm_vblank_put(dev, pipe);
802 return -ENOMEM;
803 }
804
805 vbl_swap->drw_id = swap->drawable;
806 vbl_swap->pipe = pipe;
807 vbl_swap->sequence = swap->sequence;
808
773 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 809 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
774 810
775 list_for_each(list, &dev_priv->vbl_swaps.head) { 811 list_for_each(list, &dev_priv->vbl_swaps.head) {
776 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 812 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
777 813
778 if (vbl_swap->drw_id == swap->drawable && 814 if (vbl_old->drw_id == swap->drawable &&
779 vbl_swap->plane == plane && 815 vbl_old->pipe == pipe &&
780 vbl_swap->sequence == swap->sequence) { 816 vbl_old->sequence == swap->sequence) {
781 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 817 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
782 drm_vblank_put(dev, pipe); 818 drm_vblank_put(dev, pipe);
819 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
783 DRM_DEBUG("Already scheduled\n"); 820 DRM_DEBUG("Already scheduled\n");
784 return 0; 821 return 0;
785 } 822 }
786 } 823 }
787 824
788 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 825 if (dev_priv->swaps_pending >= 10) {
789
790 if (dev_priv->swaps_pending >= 100) {
791 DRM_DEBUG("Too many swaps queued\n"); 826 DRM_DEBUG("Too many swaps queued\n");
827 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
828 drm_vblank_count(dev, i915_get_plane(dev, 0)),
829 drm_vblank_count(dev, i915_get_plane(dev, 1)));
830
831 list_for_each(list, &dev_priv->vbl_swaps.head) {
832 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
833 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
834 vbl_old->drw_id, vbl_old->pipe,
835 vbl_old->sequence);
836 }
837 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
792 drm_vblank_put(dev, pipe); 838 drm_vblank_put(dev, pipe);
839 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
793 return -EBUSY; 840 return -EBUSY;
794 } 841 }
795 842
796 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
797
798 if (!vbl_swap) {
799 DRM_ERROR("Failed to allocate memory to queue swap\n");
800 drm_vblank_put(dev, pipe);
801 return -ENOMEM;
802 }
803
804 DRM_DEBUG("\n");
805
806 vbl_swap->drw_id = swap->drawable;
807 vbl_swap->plane = plane;
808 vbl_swap->sequence = swap->sequence;
809
810 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
811
812 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); 843 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
813 dev_priv->swaps_pending++; 844 dev_priv->swaps_pending++;
814 845
@@ -835,6 +866,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
835 866
836 spin_lock_init(&dev_priv->swaps_lock); 867 spin_lock_init(&dev_priv->swaps_lock);
837 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 868 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
869 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
838 dev_priv->swaps_pending = 0; 870 dev_priv->swaps_pending = 0;
839 871
840 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 872 /* Set initial unmasked IRQs to just the selected vblank pipes. */