aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c377
2 files changed, 14 insertions, 378 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index be56b0ba88c4..4afbadb13316 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -88,13 +88,6 @@ struct mem_block {
88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
89}; 89};
90 90
91typedef struct _drm_i915_vbl_swap {
92 struct list_head head;
93 drm_drawable_t drw_id;
94 unsigned int pipe;
95 unsigned int sequence;
96} drm_i915_vbl_swap_t;
97
98struct opregion_header; 91struct opregion_header;
99struct opregion_acpi; 92struct opregion_acpi;
100struct opregion_swsci; 93struct opregion_swsci;
@@ -146,10 +139,6 @@ typedef struct drm_i915_private {
146 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 139 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
147 int vblank_pipe; 140 int vblank_pipe;
148 141
149 spinlock_t swaps_lock;
150 drm_i915_vbl_swap_t vbl_swaps;
151 unsigned int swaps_pending;
152
153 struct intel_opregion opregion; 142 struct intel_opregion opregion;
154 143
155 /* Register state */ 144 /* Register state */
@@ -242,9 +231,6 @@ typedef struct drm_i915_private {
242 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
243 u8 saveCR[37]; 232 u8 saveCR[37];
244 233
245 /** Work task for vblank-related ring access */
246 struct work_struct vblank_work;
247
248 struct { 234 struct {
249 struct drm_mm gtt_space; 235 struct drm_mm gtt_space;
250 236
@@ -445,7 +431,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
445void i915_user_irq_get(struct drm_device *dev); 431void i915_user_irq_get(struct drm_device *dev);
446void i915_user_irq_put(struct drm_device *dev); 432void i915_user_irq_put(struct drm_device *dev);
447 433
448extern void i915_vblank_work_handler(struct work_struct *work);
449extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 434extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
450extern void i915_driver_irq_preinstall(struct drm_device * dev); 435extern void i915_driver_irq_preinstall(struct drm_device * dev);
451extern int i915_driver_irq_postinstall(struct drm_device *dev); 436extern int i915_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 26f48932a51e..a75345af62ef 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -80,211 +80,6 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
80 return 0; 80 return 0;
81} 81}
82 82
83/**
84 * Emit blits for scheduled buffer swaps.
85 *
86 * This function will be called with the HW lock held.
87 * Because this function must grab the ring mutex (dev->struct_mutex),
88 * it can no longer run at soft irq time. We'll fix this when we do
89 * the DRI2 swap buffer work.
90 */
91static void i915_vblank_tasklet(struct drm_device *dev)
92{
93 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
94 unsigned long irqflags;
95 struct list_head *list, *tmp, hits, *hit;
96 int nhits, nrects, slice[2], upper[2], lower[2], i;
97 unsigned counter[2];
98 struct drm_drawable_info *drw;
99 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
100 u32 cpp = dev_priv->cpp;
101 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
102 XY_SRC_COPY_BLT_WRITE_ALPHA |
103 XY_SRC_COPY_BLT_WRITE_RGB)
104 : XY_SRC_COPY_BLT_CMD;
105 u32 src_pitch = sarea_priv->pitch * cpp;
106 u32 dst_pitch = sarea_priv->pitch * cpp;
107 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
108 RING_LOCALS;
109
110 mutex_lock(&dev->struct_mutex);
111
112 if (IS_I965G(dev) && sarea_priv->front_tiled) {
113 cmd |= XY_SRC_COPY_BLT_DST_TILED;
114 dst_pitch >>= 2;
115 }
116 if (IS_I965G(dev) && sarea_priv->back_tiled) {
117 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
118 src_pitch >>= 2;
119 }
120
121 counter[0] = drm_vblank_count(dev, 0);
122 counter[1] = drm_vblank_count(dev, 1);
123
124 DRM_DEBUG("\n");
125
126 INIT_LIST_HEAD(&hits);
127
128 nhits = nrects = 0;
129
130 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
131
132 /* Find buffer swaps scheduled for this vertical blank */
133 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
134 drm_i915_vbl_swap_t *vbl_swap =
135 list_entry(list, drm_i915_vbl_swap_t, head);
136 int pipe = vbl_swap->pipe;
137
138 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
139 continue;
140
141 list_del(list);
142 dev_priv->swaps_pending--;
143 drm_vblank_put(dev, pipe);
144
145 spin_unlock(&dev_priv->swaps_lock);
146 spin_lock(&dev->drw_lock);
147
148 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
149
150 list_for_each(hit, &hits) {
151 drm_i915_vbl_swap_t *swap_cmp =
152 list_entry(hit, drm_i915_vbl_swap_t, head);
153 struct drm_drawable_info *drw_cmp =
154 drm_get_drawable_info(dev, swap_cmp->drw_id);
155
156 /* Make sure both drawables are still
157 * around and have some rectangles before
158 * we look inside to order them for the
159 * blts below.
160 */
161 if (drw_cmp && drw_cmp->num_rects > 0 &&
162 drw && drw->num_rects > 0 &&
163 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
164 list_add_tail(list, hit);
165 break;
166 }
167 }
168
169 spin_unlock(&dev->drw_lock);
170
171 /* List of hits was empty, or we reached the end of it */
172 if (hit == &hits)
173 list_add_tail(list, hits.prev);
174
175 nhits++;
176
177 spin_lock(&dev_priv->swaps_lock);
178 }
179
180 if (nhits == 0) {
181 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
182 mutex_unlock(&dev->struct_mutex);
183 return;
184 }
185
186 spin_unlock(&dev_priv->swaps_lock);
187
188 i915_kernel_lost_context(dev);
189
190 if (IS_I965G(dev)) {
191 BEGIN_LP_RING(4);
192
193 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
194 OUT_RING(0);
195 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
196 OUT_RING(0);
197 ADVANCE_LP_RING();
198 } else {
199 BEGIN_LP_RING(6);
200
201 OUT_RING(GFX_OP_DRAWRECT_INFO);
202 OUT_RING(0);
203 OUT_RING(0);
204 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
205 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
206 OUT_RING(0);
207
208 ADVANCE_LP_RING();
209 }
210
211 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
212
213 upper[0] = upper[1] = 0;
214 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
215 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
216 lower[0] = sarea_priv->pipeA_y + slice[0];
217 lower[1] = sarea_priv->pipeB_y + slice[0];
218
219 spin_lock(&dev->drw_lock);
220
221 /* Emit blits for buffer swaps, partitioning both outputs into as many
222 * slices as there are buffer swaps scheduled in order to avoid tearing
223 * (based on the assumption that a single buffer swap would always
224 * complete before scanout starts).
225 */
226 for (i = 0; i++ < nhits;
227 upper[0] = lower[0], lower[0] += slice[0],
228 upper[1] = lower[1], lower[1] += slice[1]) {
229 if (i == nhits)
230 lower[0] = lower[1] = sarea_priv->height;
231
232 list_for_each(hit, &hits) {
233 drm_i915_vbl_swap_t *swap_hit =
234 list_entry(hit, drm_i915_vbl_swap_t, head);
235 struct drm_clip_rect *rect;
236 int num_rects, pipe;
237 unsigned short top, bottom;
238
239 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
240
241 /* The drawable may have been destroyed since
242 * the vblank swap was queued
243 */
244 if (!drw)
245 continue;
246
247 rect = drw->rects;
248 pipe = swap_hit->pipe;
249 top = upper[pipe];
250 bottom = lower[pipe];
251
252 for (num_rects = drw->num_rects; num_rects--; rect++) {
253 int y1 = max(rect->y1, top);
254 int y2 = min(rect->y2, bottom);
255
256 if (y1 >= y2)
257 continue;
258
259 BEGIN_LP_RING(8);
260
261 OUT_RING(cmd);
262 OUT_RING(ropcpp | dst_pitch);
263 OUT_RING((y1 << 16) | rect->x1);
264 OUT_RING((y2 << 16) | rect->x2);
265 OUT_RING(sarea_priv->front_offset);
266 OUT_RING((y1 << 16) | rect->x1);
267 OUT_RING(src_pitch);
268 OUT_RING(sarea_priv->back_offset);
269
270 ADVANCE_LP_RING();
271 }
272 }
273 }
274
275 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
276 mutex_unlock(&dev->struct_mutex);
277
278 list_for_each_safe(hit, tmp, &hits) {
279 drm_i915_vbl_swap_t *swap_hit =
280 list_entry(hit, drm_i915_vbl_swap_t, head);
281
282 list_del(hit);
283
284 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
285 }
286}
287
288/* Called from drm generic code, passed a 'crtc', which 83/* Called from drm generic code, passed a 'crtc', which
289 * we use as a pipe index 84 * we use as a pipe index
290 */ 85 */
@@ -322,40 +117,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
322 return count; 117 return count;
323} 118}
324 119
325void
326i915_vblank_work_handler(struct work_struct *work)
327{
328 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
329 vblank_work);
330 struct drm_device *dev = dev_priv->dev;
331 unsigned long irqflags;
332
333 if (dev->lock.hw_lock == NULL) {
334 i915_vblank_tasklet(dev);
335 return;
336 }
337
338 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
339 dev->locked_tasklet_func = i915_vblank_tasklet;
340 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
341
342 /* Try to get the lock now, if this fails, the lock
343 * holder will execute the tasklet during unlock
344 */
345 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
346 return;
347
348 dev->lock.lock_time = jiffies;
349 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
350
351 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
352 dev->locked_tasklet_func = NULL;
353 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
354
355 i915_vblank_tasklet(dev);
356 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
357}
358
359irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 120irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
360{ 121{
361 struct drm_device *dev = (struct drm_device *) arg; 122 struct drm_device *dev = (struct drm_device *) arg;
@@ -433,9 +194,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
433 if (iir & I915_ASLE_INTERRUPT) 194 if (iir & I915_ASLE_INTERRUPT)
434 opregion_asle_intr(dev); 195 opregion_asle_intr(dev);
435 196
436 if (vblank && dev_priv->swaps_pending > 0)
437 schedule_work(&dev_priv->vblank_work);
438
439 return IRQ_HANDLED; 197 return IRQ_HANDLED;
440} 198}
441 199
@@ -696,123 +454,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
696int i915_vblank_swap(struct drm_device *dev, void *data, 454int i915_vblank_swap(struct drm_device *dev, void *data,
697 struct drm_file *file_priv) 455 struct drm_file *file_priv)
698{ 456{
699 drm_i915_private_t *dev_priv = dev->dev_private; 457 /* The delayed swap mechanism was fundamentally racy, and has been
700 drm_i915_vblank_swap_t *swap = data; 458 * removed. The model was that the client requested a delayed flip/swap
701 drm_i915_vbl_swap_t *vbl_swap, *vbl_old; 459 * from the kernel, then waited for vblank before continuing to perform
702 unsigned int pipe, seqtype, curseq; 460 * rendering. The problem was that the kernel might wake the client
703 unsigned long irqflags; 461 * up before it dispatched the vblank swap (since the lock has to be
704 struct list_head *list; 462 * held while touching the ringbuffer), in which case the client would
705 int ret; 463 * clear and start the next frame before the swap occurred, and
706 464 * flicker would occur in addition to likely missing the vblank.
707 if (!dev_priv || !dev_priv->sarea_priv) { 465 *
708 DRM_ERROR("%s called with no initialization\n", __func__); 466 * In the absence of this ioctl, userland falls back to a correct path
709 return -EINVAL; 467 * of waiting for a vblank, then dispatching the swap on its own.
710 } 468 * Context switching to userland and back is plenty fast enough for
711 469 * meeting the requirements of vblank swapping.
712 if (dev_priv->sarea_priv->rotation) {
713 DRM_DEBUG("Rotation not supported\n");
714 return -EINVAL;
715 }
716
717 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
718 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
719 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
720 return -EINVAL;
721 }
722
723 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
724
725 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
726
727 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
728 DRM_ERROR("Invalid pipe %d\n", pipe);
729 return -EINVAL;
730 }
731
732 spin_lock_irqsave(&dev->drw_lock, irqflags);
733
734 if (!drm_get_drawable_info(dev, swap->drawable)) {
735 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
736 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
737 return -EINVAL;
738 }
739
740 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
741
742 /*
743 * We take the ref here and put it when the swap actually completes
744 * in the tasklet.
745 */ 470 */
746 ret = drm_vblank_get(dev, pipe); 471 return -EINVAL;
747 if (ret)
748 return ret;
749 curseq = drm_vblank_count(dev, pipe);
750
751 if (seqtype == _DRM_VBLANK_RELATIVE)
752 swap->sequence += curseq;
753
754 if ((curseq - swap->sequence) <= (1<<23)) {
755 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
756 swap->sequence = curseq + 1;
757 } else {
758 DRM_DEBUG("Missed target sequence\n");
759 drm_vblank_put(dev, pipe);
760 return -EINVAL;
761 }
762 }
763
764 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
765
766 if (!vbl_swap) {
767 DRM_ERROR("Failed to allocate memory to queue swap\n");
768 drm_vblank_put(dev, pipe);
769 return -ENOMEM;
770 }
771
772 vbl_swap->drw_id = swap->drawable;
773 vbl_swap->pipe = pipe;
774 vbl_swap->sequence = swap->sequence;
775
776 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
777
778 list_for_each(list, &dev_priv->vbl_swaps.head) {
779 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
780
781 if (vbl_old->drw_id == swap->drawable &&
782 vbl_old->pipe == pipe &&
783 vbl_old->sequence == swap->sequence) {
784 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
785 drm_vblank_put(dev, pipe);
786 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
787 DRM_DEBUG("Already scheduled\n");
788 return 0;
789 }
790 }
791
792 if (dev_priv->swaps_pending >= 10) {
793 DRM_DEBUG("Too many swaps queued\n");
794 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
795 drm_vblank_count(dev, 0),
796 drm_vblank_count(dev, 1));
797
798 list_for_each(list, &dev_priv->vbl_swaps.head) {
799 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
800 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
801 vbl_old->drw_id, vbl_old->pipe,
802 vbl_old->sequence);
803 }
804 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
805 drm_vblank_put(dev, pipe);
806 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
807 return -EBUSY;
808 }
809
810 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
811 dev_priv->swaps_pending++;
812
813 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
814
815 return 0;
816} 472}
817 473
818/* drm_dma.h hooks 474/* drm_dma.h hooks
@@ -831,11 +487,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
831 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 487 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
832 int ret, num_pipes = 2; 488 int ret, num_pipes = 2;
833 489
834 spin_lock_init(&dev_priv->swaps_lock);
835 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
836 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
837 dev_priv->swaps_pending = 0;
838
839 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 490 /* Set initial unmasked IRQs to just the selected vblank pipes. */
840 dev_priv->irq_mask_reg = ~0; 491 dev_priv->irq_mask_reg = ~0;
841 492