diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-28 11:26:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-28 11:26:12 -0400 |
commit | 7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch) | |
tree | e730a4565e0318140d2fbd2f0415d18a339d7336 /drivers/gpu/drm/i915/i915_irq.c | |
parent | 41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff) | |
parent | 0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff) |
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 583 |
1 files changed, 422 insertions, 161 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index df036118b8b1..26f48932a51e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -31,16 +31,62 @@ | |||
31 | #include "i915_drm.h" | 31 | #include "i915_drm.h" |
32 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | 33 | ||
34 | #define USER_INT_FLAG (1<<1) | ||
35 | #define VSYNC_PIPEB_FLAG (1<<5) | ||
36 | #define VSYNC_PIPEA_FLAG (1<<7) | ||
37 | |||
38 | #define MAX_NOPID ((u32)~0) | 34 | #define MAX_NOPID ((u32)~0) |
39 | 35 | ||
36 | /** These are the interrupts used by the driver */ | ||
37 | #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ | ||
38 | I915_ASLE_INTERRUPT | \ | ||
39 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ | ||
40 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) | ||
41 | |||
42 | void | ||
43 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
44 | { | ||
45 | if ((dev_priv->irq_mask_reg & mask) != 0) { | ||
46 | dev_priv->irq_mask_reg &= ~mask; | ||
47 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
48 | (void) I915_READ(IMR); | ||
49 | } | ||
50 | } | ||
51 | |||
52 | static inline void | ||
53 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
54 | { | ||
55 | if ((dev_priv->irq_mask_reg & mask) != mask) { | ||
56 | dev_priv->irq_mask_reg |= mask; | ||
57 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
58 | (void) I915_READ(IMR); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * i915_pipe_enabled - check if a pipe is enabled | ||
64 | * @dev: DRM device | ||
65 | * @pipe: pipe to check | ||
66 | * | ||
67 | * Reading certain registers when the pipe is disabled can hang the chip. | ||
68 | * Use this routine to make sure the PLL is running and the pipe is active | ||
69 | * before reading such registers if unsure. | ||
70 | */ | ||
71 | static int | ||
72 | i915_pipe_enabled(struct drm_device *dev, int pipe) | ||
73 | { | ||
74 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
75 | unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; | ||
76 | |||
77 | if (I915_READ(pipeconf) & PIPEACONF_ENABLE) | ||
78 | return 1; | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
40 | /** | 83 | /** |
41 | * Emit blits for scheduled buffer swaps. | 84 | * Emit blits for scheduled buffer swaps. |
42 | * | 85 | * |
43 | * This function will be called with the HW lock held. | 86 | * This function will be called with the HW lock held. |
87 | * Because this function must grab the ring mutex (dev->struct_mutex), | ||
88 | * it can no longer run at soft irq time. We'll fix this when we do | ||
89 | * the DRI2 swap buffer work. | ||
44 | */ | 90 | */ |
45 | static void i915_vblank_tasklet(struct drm_device *dev) | 91 | static void i915_vblank_tasklet(struct drm_device *dev) |
46 | { | 92 | { |
@@ -48,8 +94,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
48 | unsigned long irqflags; | 94 | unsigned long irqflags; |
49 | struct list_head *list, *tmp, hits, *hit; | 95 | struct list_head *list, *tmp, hits, *hit; |
50 | int nhits, nrects, slice[2], upper[2], lower[2], i; | 96 | int nhits, nrects, slice[2], upper[2], lower[2], i; |
51 | unsigned counter[2] = { atomic_read(&dev->vbl_received), | 97 | unsigned counter[2]; |
52 | atomic_read(&dev->vbl_received2) }; | ||
53 | struct drm_drawable_info *drw; | 98 | struct drm_drawable_info *drw; |
54 | drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; | 99 | drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; |
55 | u32 cpp = dev_priv->cpp; | 100 | u32 cpp = dev_priv->cpp; |
@@ -62,6 +107,8 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
62 | u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); | 107 | u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); |
63 | RING_LOCALS; | 108 | RING_LOCALS; |
64 | 109 | ||
110 | mutex_lock(&dev->struct_mutex); | ||
111 | |||
65 | if (IS_I965G(dev) && sarea_priv->front_tiled) { | 112 | if (IS_I965G(dev) && sarea_priv->front_tiled) { |
66 | cmd |= XY_SRC_COPY_BLT_DST_TILED; | 113 | cmd |= XY_SRC_COPY_BLT_DST_TILED; |
67 | dst_pitch >>= 2; | 114 | dst_pitch >>= 2; |
@@ -71,6 +118,9 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
71 | src_pitch >>= 2; | 118 | src_pitch >>= 2; |
72 | } | 119 | } |
73 | 120 | ||
121 | counter[0] = drm_vblank_count(dev, 0); | ||
122 | counter[1] = drm_vblank_count(dev, 1); | ||
123 | |||
74 | DRM_DEBUG("\n"); | 124 | DRM_DEBUG("\n"); |
75 | 125 | ||
76 | INIT_LIST_HEAD(&hits); | 126 | INIT_LIST_HEAD(&hits); |
@@ -83,32 +133,33 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
83 | list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { | 133 | list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { |
84 | drm_i915_vbl_swap_t *vbl_swap = | 134 | drm_i915_vbl_swap_t *vbl_swap = |
85 | list_entry(list, drm_i915_vbl_swap_t, head); | 135 | list_entry(list, drm_i915_vbl_swap_t, head); |
136 | int pipe = vbl_swap->pipe; | ||
86 | 137 | ||
87 | if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) | 138 | if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) |
88 | continue; | 139 | continue; |
89 | 140 | ||
90 | list_del(list); | 141 | list_del(list); |
91 | dev_priv->swaps_pending--; | 142 | dev_priv->swaps_pending--; |
143 | drm_vblank_put(dev, pipe); | ||
92 | 144 | ||
93 | spin_unlock(&dev_priv->swaps_lock); | 145 | spin_unlock(&dev_priv->swaps_lock); |
94 | spin_lock(&dev->drw_lock); | 146 | spin_lock(&dev->drw_lock); |
95 | 147 | ||
96 | drw = drm_get_drawable_info(dev, vbl_swap->drw_id); | 148 | drw = drm_get_drawable_info(dev, vbl_swap->drw_id); |
97 | 149 | ||
98 | if (!drw) { | ||
99 | spin_unlock(&dev->drw_lock); | ||
100 | drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
101 | spin_lock(&dev_priv->swaps_lock); | ||
102 | continue; | ||
103 | } | ||
104 | |||
105 | list_for_each(hit, &hits) { | 150 | list_for_each(hit, &hits) { |
106 | drm_i915_vbl_swap_t *swap_cmp = | 151 | drm_i915_vbl_swap_t *swap_cmp = |
107 | list_entry(hit, drm_i915_vbl_swap_t, head); | 152 | list_entry(hit, drm_i915_vbl_swap_t, head); |
108 | struct drm_drawable_info *drw_cmp = | 153 | struct drm_drawable_info *drw_cmp = |
109 | drm_get_drawable_info(dev, swap_cmp->drw_id); | 154 | drm_get_drawable_info(dev, swap_cmp->drw_id); |
110 | 155 | ||
111 | if (drw_cmp && | 156 | /* Make sure both drawables are still |
157 | * around and have some rectangles before | ||
158 | * we look inside to order them for the | ||
159 | * blts below. | ||
160 | */ | ||
161 | if (drw_cmp && drw_cmp->num_rects > 0 && | ||
162 | drw && drw->num_rects > 0 && | ||
112 | drw_cmp->rects[0].y1 > drw->rects[0].y1) { | 163 | drw_cmp->rects[0].y1 > drw->rects[0].y1) { |
113 | list_add_tail(list, hit); | 164 | list_add_tail(list, hit); |
114 | break; | 165 | break; |
@@ -128,6 +179,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
128 | 179 | ||
129 | if (nhits == 0) { | 180 | if (nhits == 0) { |
130 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | 181 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); |
182 | mutex_unlock(&dev->struct_mutex); | ||
131 | return; | 183 | return; |
132 | } | 184 | } |
133 | 185 | ||
@@ -186,6 +238,9 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
186 | 238 | ||
187 | drw = drm_get_drawable_info(dev, swap_hit->drw_id); | 239 | drw = drm_get_drawable_info(dev, swap_hit->drw_id); |
188 | 240 | ||
241 | /* The drawable may have been destroyed since | ||
242 | * the vblank swap was queued | ||
243 | */ | ||
189 | if (!drw) | 244 | if (!drw) |
190 | continue; | 245 | continue; |
191 | 246 | ||
@@ -218,6 +273,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
218 | } | 273 | } |
219 | 274 | ||
220 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | 275 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); |
276 | mutex_unlock(&dev->struct_mutex); | ||
221 | 277 | ||
222 | list_for_each_safe(hit, tmp, &hits) { | 278 | list_for_each_safe(hit, tmp, &hits) { |
223 | drm_i915_vbl_swap_t *swap_hit = | 279 | drm_i915_vbl_swap_t *swap_hit = |
@@ -229,63 +285,157 @@ static void i915_vblank_tasklet(struct drm_device *dev) | |||
229 | } | 285 | } |
230 | } | 286 | } |
231 | 287 | ||
288 | /* Called from drm generic code, passed a 'crtc', which | ||
289 | * we use as a pipe index | ||
290 | */ | ||
291 | u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | ||
292 | { | ||
293 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
294 | unsigned long high_frame; | ||
295 | unsigned long low_frame; | ||
296 | u32 high1, high2, low, count; | ||
297 | |||
298 | high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; | ||
299 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | ||
300 | |||
301 | if (!i915_pipe_enabled(dev, pipe)) { | ||
302 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * High & low register fields aren't synchronized, so make sure | ||
308 | * we get a low value that's stable across two reads of the high | ||
309 | * register. | ||
310 | */ | ||
311 | do { | ||
312 | high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | ||
313 | PIPE_FRAME_HIGH_SHIFT); | ||
314 | low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> | ||
315 | PIPE_FRAME_LOW_SHIFT); | ||
316 | high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | ||
317 | PIPE_FRAME_HIGH_SHIFT); | ||
318 | } while (high1 != high2); | ||
319 | |||
320 | count = (high1 << 8) | low; | ||
321 | |||
322 | return count; | ||
323 | } | ||
324 | |||
325 | void | ||
326 | i915_vblank_work_handler(struct work_struct *work) | ||
327 | { | ||
328 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
329 | vblank_work); | ||
330 | struct drm_device *dev = dev_priv->dev; | ||
331 | unsigned long irqflags; | ||
332 | |||
333 | if (dev->lock.hw_lock == NULL) { | ||
334 | i915_vblank_tasklet(dev); | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
339 | dev->locked_tasklet_func = i915_vblank_tasklet; | ||
340 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
341 | |||
342 | /* Try to get the lock now, if this fails, the lock | ||
343 | * holder will execute the tasklet during unlock | ||
344 | */ | ||
345 | if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) | ||
346 | return; | ||
347 | |||
348 | dev->lock.lock_time = jiffies; | ||
349 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
350 | |||
351 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
352 | dev->locked_tasklet_func = NULL; | ||
353 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
354 | |||
355 | i915_vblank_tasklet(dev); | ||
356 | drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); | ||
357 | } | ||
358 | |||
232 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 359 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
233 | { | 360 | { |
234 | struct drm_device *dev = (struct drm_device *) arg; | 361 | struct drm_device *dev = (struct drm_device *) arg; |
235 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 362 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
236 | u16 temp; | 363 | u32 iir; |
237 | u32 pipea_stats, pipeb_stats; | 364 | u32 pipea_stats, pipeb_stats; |
365 | int vblank = 0; | ||
238 | 366 | ||
239 | pipea_stats = I915_READ(I915REG_PIPEASTAT); | 367 | atomic_inc(&dev_priv->irq_received); |
240 | pipeb_stats = I915_READ(I915REG_PIPEBSTAT); | ||
241 | 368 | ||
242 | temp = I915_READ16(I915REG_INT_IDENTITY_R); | 369 | if (dev->pdev->msi_enabled) |
370 | I915_WRITE(IMR, ~0); | ||
371 | iir = I915_READ(IIR); | ||
243 | 372 | ||
244 | temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); | 373 | if (iir == 0) { |
374 | if (dev->pdev->msi_enabled) { | ||
375 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
376 | (void) I915_READ(IMR); | ||
377 | } | ||
378 | return IRQ_NONE; | ||
379 | } | ||
245 | 380 | ||
246 | DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); | 381 | /* |
382 | * Clear the PIPE(A|B)STAT regs before the IIR otherwise | ||
383 | * we may get extra interrupts. | ||
384 | */ | ||
385 | if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { | ||
386 | pipea_stats = I915_READ(PIPEASTAT); | ||
387 | if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) | ||
388 | pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | ||
389 | PIPE_VBLANK_INTERRUPT_ENABLE); | ||
390 | else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| | ||
391 | PIPE_VBLANK_INTERRUPT_STATUS)) { | ||
392 | vblank++; | ||
393 | drm_handle_vblank(dev, 0); | ||
394 | } | ||
247 | 395 | ||
248 | if (temp == 0) | 396 | I915_WRITE(PIPEASTAT, pipea_stats); |
249 | return IRQ_NONE; | 397 | } |
398 | if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { | ||
399 | pipeb_stats = I915_READ(PIPEBSTAT); | ||
400 | /* Ack the event */ | ||
401 | I915_WRITE(PIPEBSTAT, pipeb_stats); | ||
402 | |||
403 | /* The vblank interrupt gets enabled even if we didn't ask for | ||
404 | it, so make sure it's shut down again */ | ||
405 | if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) | ||
406 | pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | ||
407 | PIPE_VBLANK_INTERRUPT_ENABLE); | ||
408 | else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| | ||
409 | PIPE_VBLANK_INTERRUPT_STATUS)) { | ||
410 | vblank++; | ||
411 | drm_handle_vblank(dev, 1); | ||
412 | } | ||
250 | 413 | ||
251 | I915_WRITE16(I915REG_INT_IDENTITY_R, temp); | 414 | if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) |
252 | (void) I915_READ16(I915REG_INT_IDENTITY_R); | 415 | opregion_asle_intr(dev); |
253 | DRM_READMEMORYBARRIER(); | 416 | I915_WRITE(PIPEBSTAT, pipeb_stats); |
417 | } | ||
254 | 418 | ||
255 | dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 419 | I915_WRITE(IIR, iir); |
420 | if (dev->pdev->msi_enabled) | ||
421 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
422 | (void) I915_READ(IIR); /* Flush posted writes */ | ||
256 | 423 | ||
257 | if (temp & USER_INT_FLAG) | 424 | if (dev_priv->sarea_priv) |
258 | DRM_WAKEUP(&dev_priv->irq_queue); | 425 | dev_priv->sarea_priv->last_dispatch = |
426 | READ_BREADCRUMB(dev_priv); | ||
259 | 427 | ||
260 | if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { | 428 | if (iir & I915_USER_INTERRUPT) { |
261 | int vblank_pipe = dev_priv->vblank_pipe; | 429 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); |
262 | 430 | DRM_WAKEUP(&dev_priv->irq_queue); | |
263 | if ((vblank_pipe & | ||
264 | (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) | ||
265 | == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { | ||
266 | if (temp & VSYNC_PIPEA_FLAG) | ||
267 | atomic_inc(&dev->vbl_received); | ||
268 | if (temp & VSYNC_PIPEB_FLAG) | ||
269 | atomic_inc(&dev->vbl_received2); | ||
270 | } else if (((temp & VSYNC_PIPEA_FLAG) && | ||
271 | (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || | ||
272 | ((temp & VSYNC_PIPEB_FLAG) && | ||
273 | (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) | ||
274 | atomic_inc(&dev->vbl_received); | ||
275 | |||
276 | DRM_WAKEUP(&dev->vbl_queue); | ||
277 | drm_vbl_send_signals(dev); | ||
278 | |||
279 | if (dev_priv->swaps_pending > 0) | ||
280 | drm_locked_tasklet(dev, i915_vblank_tasklet); | ||
281 | I915_WRITE(I915REG_PIPEASTAT, | ||
282 | pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| | ||
283 | I915_VBLANK_CLEAR); | ||
284 | I915_WRITE(I915REG_PIPEBSTAT, | ||
285 | pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE| | ||
286 | I915_VBLANK_CLEAR); | ||
287 | } | 431 | } |
288 | 432 | ||
433 | if (iir & I915_ASLE_INTERRUPT) | ||
434 | opregion_asle_intr(dev); | ||
435 | |||
436 | if (vblank && dev_priv->swaps_pending > 0) | ||
437 | schedule_work(&dev_priv->vblank_work); | ||
438 | |||
289 | return IRQ_HANDLED; | 439 | return IRQ_HANDLED; |
290 | } | 440 | } |
291 | 441 | ||
@@ -298,23 +448,47 @@ static int i915_emit_irq(struct drm_device * dev) | |||
298 | 448 | ||
299 | DRM_DEBUG("\n"); | 449 | DRM_DEBUG("\n"); |
300 | 450 | ||
301 | dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; | 451 | dev_priv->counter++; |
302 | |||
303 | if (dev_priv->counter > 0x7FFFFFFFUL) | 452 | if (dev_priv->counter > 0x7FFFFFFFUL) |
304 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; | 453 | dev_priv->counter = 1; |
454 | if (dev_priv->sarea_priv) | ||
455 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | ||
305 | 456 | ||
306 | BEGIN_LP_RING(6); | 457 | BEGIN_LP_RING(6); |
307 | OUT_RING(CMD_STORE_DWORD_IDX); | 458 | OUT_RING(MI_STORE_DWORD_INDEX); |
308 | OUT_RING(20); | 459 | OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); |
309 | OUT_RING(dev_priv->counter); | 460 | OUT_RING(dev_priv->counter); |
310 | OUT_RING(0); | 461 | OUT_RING(0); |
311 | OUT_RING(0); | 462 | OUT_RING(0); |
312 | OUT_RING(GFX_OP_USER_INTERRUPT); | 463 | OUT_RING(MI_USER_INTERRUPT); |
313 | ADVANCE_LP_RING(); | 464 | ADVANCE_LP_RING(); |
314 | 465 | ||
315 | return dev_priv->counter; | 466 | return dev_priv->counter; |
316 | } | 467 | } |
317 | 468 | ||
469 | void i915_user_irq_get(struct drm_device *dev) | ||
470 | { | ||
471 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
472 | unsigned long irqflags; | ||
473 | |||
474 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
475 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) | ||
476 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | ||
477 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
478 | } | ||
479 | |||
480 | void i915_user_irq_put(struct drm_device *dev) | ||
481 | { | ||
482 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
483 | unsigned long irqflags; | ||
484 | |||
485 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
486 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | ||
487 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) | ||
488 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | ||
489 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
490 | } | ||
491 | |||
318 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 492 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
319 | { | 493 | { |
320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 494 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -323,55 +497,34 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
323 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, | 497 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, |
324 | READ_BREADCRUMB(dev_priv)); | 498 | READ_BREADCRUMB(dev_priv)); |
325 | 499 | ||
326 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) | 500 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
501 | if (dev_priv->sarea_priv) { | ||
502 | dev_priv->sarea_priv->last_dispatch = | ||
503 | READ_BREADCRUMB(dev_priv); | ||
504 | } | ||
327 | return 0; | 505 | return 0; |
506 | } | ||
328 | 507 | ||
329 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 508 | if (dev_priv->sarea_priv) |
509 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
330 | 510 | ||
511 | i915_user_irq_get(dev); | ||
331 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, | 512 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, |
332 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 513 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
514 | i915_user_irq_put(dev); | ||
333 | 515 | ||
334 | if (ret == -EBUSY) { | 516 | if (ret == -EBUSY) { |
335 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 517 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
336 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); | 518 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); |
337 | } | 519 | } |
338 | 520 | ||
339 | dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 521 | if (dev_priv->sarea_priv) |
340 | return ret; | 522 | dev_priv->sarea_priv->last_dispatch = |
341 | } | 523 | READ_BREADCRUMB(dev_priv); |
342 | |||
343 | static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, | ||
344 | atomic_t *counter) | ||
345 | { | ||
346 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
347 | unsigned int cur_vblank; | ||
348 | int ret = 0; | ||
349 | |||
350 | if (!dev_priv) { | ||
351 | DRM_ERROR("called with no initialization\n"); | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, | ||
356 | (((cur_vblank = atomic_read(counter)) | ||
357 | - *sequence) <= (1<<23))); | ||
358 | |||
359 | *sequence = cur_vblank; | ||
360 | 524 | ||
361 | return ret; | 525 | return ret; |
362 | } | 526 | } |
363 | 527 | ||
364 | |||
365 | int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) | ||
366 | { | ||
367 | return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); | ||
368 | } | ||
369 | |||
370 | int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) | ||
371 | { | ||
372 | return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); | ||
373 | } | ||
374 | |||
375 | /* Needs the lock as it touches the ring. | 528 | /* Needs the lock as it touches the ring. |
376 | */ | 529 | */ |
377 | int i915_irq_emit(struct drm_device *dev, void *data, | 530 | int i915_irq_emit(struct drm_device *dev, void *data, |
@@ -381,14 +534,15 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
381 | drm_i915_irq_emit_t *emit = data; | 534 | drm_i915_irq_emit_t *emit = data; |
382 | int result; | 535 | int result; |
383 | 536 | ||
384 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 537 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
385 | 538 | ||
386 | if (!dev_priv) { | 539 | if (!dev_priv) { |
387 | DRM_ERROR("called with no initialization\n"); | 540 | DRM_ERROR("called with no initialization\n"); |
388 | return -EINVAL; | 541 | return -EINVAL; |
389 | } | 542 | } |
390 | 543 | mutex_lock(&dev->struct_mutex); | |
391 | result = i915_emit_irq(dev); | 544 | result = i915_emit_irq(dev); |
545 | mutex_unlock(&dev->struct_mutex); | ||
392 | 546 | ||
393 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 547 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
394 | DRM_ERROR("copy_to_user\n"); | 548 | DRM_ERROR("copy_to_user\n"); |
@@ -414,18 +568,95 @@ int i915_irq_wait(struct drm_device *dev, void *data, | |||
414 | return i915_wait_irq(dev, irqwait->irq_seq); | 568 | return i915_wait_irq(dev, irqwait->irq_seq); |
415 | } | 569 | } |
416 | 570 | ||
417 | static void i915_enable_interrupt (struct drm_device *dev) | 571 | /* Called from drm generic code, passed 'crtc' which |
572 | * we use as a pipe index | ||
573 | */ | ||
574 | int i915_enable_vblank(struct drm_device *dev, int pipe) | ||
418 | { | 575 | { |
419 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 576 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
420 | u16 flag; | 577 | u32 pipestat_reg = 0; |
578 | u32 pipestat; | ||
579 | u32 interrupt = 0; | ||
580 | unsigned long irqflags; | ||
421 | 581 | ||
422 | flag = 0; | 582 | switch (pipe) { |
423 | if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) | 583 | case 0: |
424 | flag |= VSYNC_PIPEA_FLAG; | 584 | pipestat_reg = PIPEASTAT; |
425 | if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) | 585 | interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; |
426 | flag |= VSYNC_PIPEB_FLAG; | 586 | break; |
587 | case 1: | ||
588 | pipestat_reg = PIPEBSTAT; | ||
589 | interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
590 | break; | ||
591 | default: | ||
592 | DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", | ||
593 | pipe); | ||
594 | return 0; | ||
595 | } | ||
427 | 596 | ||
428 | I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); | 597 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
598 | /* Enabling vblank events in IMR comes before PIPESTAT write, or | ||
599 | * there's a race where the PIPESTAT vblank bit gets set to 1, so | ||
600 | * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in | ||
601 | * ISR flashes to 1, but the IIR bit doesn't get set to 1 because | ||
602 | * IMR masks it. It doesn't ever get set after we clear the masking | ||
603 | * in IMR because the ISR bit is edge, not level-triggered, on the | ||
604 | * OR of PIPESTAT bits. | ||
605 | */ | ||
606 | i915_enable_irq(dev_priv, interrupt); | ||
607 | pipestat = I915_READ(pipestat_reg); | ||
608 | if (IS_I965G(dev)) | ||
609 | pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; | ||
610 | else | ||
611 | pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; | ||
612 | /* Clear any stale interrupt status */ | ||
613 | pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | | ||
614 | PIPE_VBLANK_INTERRUPT_STATUS); | ||
615 | I915_WRITE(pipestat_reg, pipestat); | ||
616 | (void) I915_READ(pipestat_reg); /* Posting read */ | ||
617 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | /* Called from drm generic code, passed 'crtc' which | ||
623 | * we use as a pipe index | ||
624 | */ | ||
625 | void i915_disable_vblank(struct drm_device *dev, int pipe) | ||
626 | { | ||
627 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
628 | u32 pipestat_reg = 0; | ||
629 | u32 pipestat; | ||
630 | u32 interrupt = 0; | ||
631 | unsigned long irqflags; | ||
632 | |||
633 | switch (pipe) { | ||
634 | case 0: | ||
635 | pipestat_reg = PIPEASTAT; | ||
636 | interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | ||
637 | break; | ||
638 | case 1: | ||
639 | pipestat_reg = PIPEBSTAT; | ||
640 | interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
641 | break; | ||
642 | default: | ||
643 | DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", | ||
644 | pipe); | ||
645 | return; | ||
646 | break; | ||
647 | } | ||
648 | |||
649 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
650 | i915_disable_irq(dev_priv, interrupt); | ||
651 | pipestat = I915_READ(pipestat_reg); | ||
652 | pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | ||
653 | PIPE_VBLANK_INTERRUPT_ENABLE); | ||
654 | /* Clear any stale interrupt status */ | ||
655 | pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | | ||
656 | PIPE_VBLANK_INTERRUPT_STATUS); | ||
657 | I915_WRITE(pipestat_reg, pipestat); | ||
658 | (void) I915_READ(pipestat_reg); /* Posting read */ | ||
659 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
429 | } | 660 | } |
430 | 661 | ||
431 | /* Set the vblank monitor pipe | 662 | /* Set the vblank monitor pipe |
@@ -434,22 +665,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data, | |||
434 | struct drm_file *file_priv) | 665 | struct drm_file *file_priv) |
435 | { | 666 | { |
436 | drm_i915_private_t *dev_priv = dev->dev_private; | 667 | drm_i915_private_t *dev_priv = dev->dev_private; |
437 | drm_i915_vblank_pipe_t *pipe = data; | ||
438 | 668 | ||
439 | if (!dev_priv) { | 669 | if (!dev_priv) { |
440 | DRM_ERROR("called with no initialization\n"); | 670 | DRM_ERROR("called with no initialization\n"); |
441 | return -EINVAL; | 671 | return -EINVAL; |
442 | } | 672 | } |
443 | 673 | ||
444 | if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { | ||
445 | DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | |||
449 | dev_priv->vblank_pipe = pipe->pipe; | ||
450 | |||
451 | i915_enable_interrupt (dev); | ||
452 | |||
453 | return 0; | 674 | return 0; |
454 | } | 675 | } |
455 | 676 | ||
@@ -458,19 +679,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |||
458 | { | 679 | { |
459 | drm_i915_private_t *dev_priv = dev->dev_private; | 680 | drm_i915_private_t *dev_priv = dev->dev_private; |
460 | drm_i915_vblank_pipe_t *pipe = data; | 681 | drm_i915_vblank_pipe_t *pipe = data; |
461 | u16 flag; | ||
462 | 682 | ||
463 | if (!dev_priv) { | 683 | if (!dev_priv) { |
464 | DRM_ERROR("called with no initialization\n"); | 684 | DRM_ERROR("called with no initialization\n"); |
465 | return -EINVAL; | 685 | return -EINVAL; |
466 | } | 686 | } |
467 | 687 | ||
468 | flag = I915_READ(I915REG_INT_ENABLE_R); | 688 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
469 | pipe->pipe = 0; | ||
470 | if (flag & VSYNC_PIPEA_FLAG) | ||
471 | pipe->pipe |= DRM_I915_VBLANK_PIPE_A; | ||
472 | if (flag & VSYNC_PIPEB_FLAG) | ||
473 | pipe->pipe |= DRM_I915_VBLANK_PIPE_B; | ||
474 | 689 | ||
475 | return 0; | 690 | return 0; |
476 | } | 691 | } |
@@ -483,12 +698,13 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
483 | { | 698 | { |
484 | drm_i915_private_t *dev_priv = dev->dev_private; | 699 | drm_i915_private_t *dev_priv = dev->dev_private; |
485 | drm_i915_vblank_swap_t *swap = data; | 700 | drm_i915_vblank_swap_t *swap = data; |
486 | drm_i915_vbl_swap_t *vbl_swap; | 701 | drm_i915_vbl_swap_t *vbl_swap, *vbl_old; |
487 | unsigned int pipe, seqtype, curseq; | 702 | unsigned int pipe, seqtype, curseq; |
488 | unsigned long irqflags; | 703 | unsigned long irqflags; |
489 | struct list_head *list; | 704 | struct list_head *list; |
705 | int ret; | ||
490 | 706 | ||
491 | if (!dev_priv) { | 707 | if (!dev_priv || !dev_priv->sarea_priv) { |
492 | DRM_ERROR("%s called with no initialization\n", __func__); | 708 | DRM_ERROR("%s called with no initialization\n", __func__); |
493 | return -EINVAL; | 709 | return -EINVAL; |
494 | } | 710 | } |
@@ -523,7 +739,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
523 | 739 | ||
524 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | 740 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); |
525 | 741 | ||
526 | curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); | 742 | /* |
743 | * We take the ref here and put it when the swap actually completes | ||
744 | * in the tasklet. | ||
745 | */ | ||
746 | ret = drm_vblank_get(dev, pipe); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | curseq = drm_vblank_count(dev, pipe); | ||
527 | 750 | ||
528 | if (seqtype == _DRM_VBLANK_RELATIVE) | 751 | if (seqtype == _DRM_VBLANK_RELATIVE) |
529 | swap->sequence += curseq; | 752 | swap->sequence += curseq; |
@@ -533,46 +756,57 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
533 | swap->sequence = curseq + 1; | 756 | swap->sequence = curseq + 1; |
534 | } else { | 757 | } else { |
535 | DRM_DEBUG("Missed target sequence\n"); | 758 | DRM_DEBUG("Missed target sequence\n"); |
759 | drm_vblank_put(dev, pipe); | ||
536 | return -EINVAL; | 760 | return -EINVAL; |
537 | } | 761 | } |
538 | } | 762 | } |
539 | 763 | ||
764 | vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
765 | |||
766 | if (!vbl_swap) { | ||
767 | DRM_ERROR("Failed to allocate memory to queue swap\n"); | ||
768 | drm_vblank_put(dev, pipe); | ||
769 | return -ENOMEM; | ||
770 | } | ||
771 | |||
772 | vbl_swap->drw_id = swap->drawable; | ||
773 | vbl_swap->pipe = pipe; | ||
774 | vbl_swap->sequence = swap->sequence; | ||
775 | |||
540 | spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); | 776 | spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); |
541 | 777 | ||
542 | list_for_each(list, &dev_priv->vbl_swaps.head) { | 778 | list_for_each(list, &dev_priv->vbl_swaps.head) { |
543 | vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); | 779 | vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); |
544 | 780 | ||
545 | if (vbl_swap->drw_id == swap->drawable && | 781 | if (vbl_old->drw_id == swap->drawable && |
546 | vbl_swap->pipe == pipe && | 782 | vbl_old->pipe == pipe && |
547 | vbl_swap->sequence == swap->sequence) { | 783 | vbl_old->sequence == swap->sequence) { |
548 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | 784 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); |
785 | drm_vblank_put(dev, pipe); | ||
786 | drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
549 | DRM_DEBUG("Already scheduled\n"); | 787 | DRM_DEBUG("Already scheduled\n"); |
550 | return 0; | 788 | return 0; |
551 | } | 789 | } |
552 | } | 790 | } |
553 | 791 | ||
554 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | 792 | if (dev_priv->swaps_pending >= 10) { |
555 | |||
556 | if (dev_priv->swaps_pending >= 100) { | ||
557 | DRM_DEBUG("Too many swaps queued\n"); | 793 | DRM_DEBUG("Too many swaps queued\n"); |
794 | DRM_DEBUG(" pipe 0: %d pipe 1: %d\n", | ||
795 | drm_vblank_count(dev, 0), | ||
796 | drm_vblank_count(dev, 1)); | ||
797 | |||
798 | list_for_each(list, &dev_priv->vbl_swaps.head) { | ||
799 | vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); | ||
800 | DRM_DEBUG("\tdrw %x pipe %d seq %x\n", | ||
801 | vbl_old->drw_id, vbl_old->pipe, | ||
802 | vbl_old->sequence); | ||
803 | } | ||
804 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | ||
805 | drm_vblank_put(dev, pipe); | ||
806 | drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
558 | return -EBUSY; | 807 | return -EBUSY; |
559 | } | 808 | } |
560 | 809 | ||
561 | vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
562 | |||
563 | if (!vbl_swap) { | ||
564 | DRM_ERROR("Failed to allocate memory to queue swap\n"); | ||
565 | return -ENOMEM; | ||
566 | } | ||
567 | |||
568 | DRM_DEBUG("\n"); | ||
569 | |||
570 | vbl_swap->drw_id = swap->drawable; | ||
571 | vbl_swap->pipe = pipe; | ||
572 | vbl_swap->sequence = swap->sequence; | ||
573 | |||
574 | spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); | ||
575 | |||
576 | list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); | 810 | list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); |
577 | dev_priv->swaps_pending++; | 811 | dev_priv->swaps_pending++; |
578 | 812 | ||
@@ -587,37 +821,64 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
587 | { | 821 | { |
588 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 822 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
589 | 823 | ||
590 | I915_WRITE16(I915REG_HWSTAM, 0xfffe); | 824 | I915_WRITE(HWSTAM, 0xeffe); |
591 | I915_WRITE16(I915REG_INT_MASK_R, 0x0); | 825 | I915_WRITE(IMR, 0xffffffff); |
592 | I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); | 826 | I915_WRITE(IER, 0x0); |
593 | } | 827 | } |
594 | 828 | ||
595 | void i915_driver_irq_postinstall(struct drm_device * dev) | 829 | int i915_driver_irq_postinstall(struct drm_device *dev) |
596 | { | 830 | { |
597 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 831 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
832 | int ret, num_pipes = 2; | ||
598 | 833 | ||
599 | spin_lock_init(&dev_priv->swaps_lock); | 834 | spin_lock_init(&dev_priv->swaps_lock); |
600 | INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); | 835 | INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); |
836 | INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler); | ||
601 | dev_priv->swaps_pending = 0; | 837 | dev_priv->swaps_pending = 0; |
602 | 838 | ||
603 | if (!dev_priv->vblank_pipe) | 839 | /* Set initial unmasked IRQs to just the selected vblank pipes. */ |
604 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; | 840 | dev_priv->irq_mask_reg = ~0; |
605 | i915_enable_interrupt(dev); | 841 | |
842 | ret = drm_vblank_init(dev, num_pipes); | ||
843 | if (ret) | ||
844 | return ret; | ||
845 | |||
846 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | ||
847 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | ||
848 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
849 | |||
850 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
851 | |||
852 | dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; | ||
853 | |||
854 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
855 | I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); | ||
856 | (void) I915_READ(IER); | ||
857 | |||
858 | opregion_enable_asle(dev); | ||
606 | DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); | 859 | DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); |
860 | |||
861 | return 0; | ||
607 | } | 862 | } |
608 | 863 | ||
609 | void i915_driver_irq_uninstall(struct drm_device * dev) | 864 | void i915_driver_irq_uninstall(struct drm_device * dev) |
610 | { | 865 | { |
611 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 866 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
612 | u16 temp; | 867 | u32 temp; |
613 | 868 | ||
614 | if (!dev_priv) | 869 | if (!dev_priv) |
615 | return; | 870 | return; |
616 | 871 | ||
617 | I915_WRITE16(I915REG_HWSTAM, 0xffff); | 872 | dev_priv->vblank_pipe = 0; |
618 | I915_WRITE16(I915REG_INT_MASK_R, 0xffff); | 873 | |
619 | I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); | 874 | I915_WRITE(HWSTAM, 0xffffffff); |
875 | I915_WRITE(IMR, 0xffffffff); | ||
876 | I915_WRITE(IER, 0x0); | ||
620 | 877 | ||
621 | temp = I915_READ16(I915REG_INT_IDENTITY_R); | 878 | temp = I915_READ(PIPEASTAT); |
622 | I915_WRITE16(I915REG_INT_IDENTITY_R, temp); | 879 | I915_WRITE(PIPEASTAT, temp); |
880 | temp = I915_READ(PIPEBSTAT); | ||
881 | I915_WRITE(PIPEBSTAT, temp); | ||
882 | temp = I915_READ(IIR); | ||
883 | I915_WRITE(IIR, temp); | ||
623 | } | 884 | } |