aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_irq.c20
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c153
4 files changed, 145 insertions, 55 deletions
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53f0e5af1cc8..61ed5158f783 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64 return -EINVAL; 64 return -EINVAL;
65 65
66 p->irq = dev->irq; 66 p->irq = dev->pdev->irq;
67 67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq); 69 p->irq);
@@ -89,7 +89,7 @@ static int drm_irq_install(struct drm_device * dev)
89 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 89 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
90 return -EINVAL; 90 return -EINVAL;
91 91
92 if (dev->irq == 0) 92 if (dev->pdev->irq == 0)
93 return -EINVAL; 93 return -EINVAL;
94 94
95 mutex_lock(&dev->struct_mutex); 95 mutex_lock(&dev->struct_mutex);
@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
107 dev->irq_enabled = 1; 107 dev->irq_enabled = 1;
108 mutex_unlock(&dev->struct_mutex); 108 mutex_unlock(&dev->struct_mutex);
109 109
110 DRM_DEBUG("irq=%d\n", dev->irq); 110 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
111 111
112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) { 112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 init_waitqueue_head(&dev->vbl_queue); 113 init_waitqueue_head(&dev->vbl_queue);
@@ -127,8 +127,12 @@ static int drm_irq_install(struct drm_device * dev)
127 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 127 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
128 sh_flags = IRQF_SHARED; 128 sh_flags = IRQF_SHARED;
129 129
130 ret = request_irq(dev->irq, dev->driver->irq_handler, 130 ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
131 sh_flags, dev->devname, dev); 131 sh_flags, dev->devname, dev);
132 /* Expose the device irq number to drivers that want to export it for
133 * whatever reason.
134 */
135 dev->irq = dev->pdev->irq;
132 if (ret < 0) { 136 if (ret < 0) {
133 mutex_lock(&dev->struct_mutex); 137 mutex_lock(&dev->struct_mutex);
134 dev->irq_enabled = 0; 138 dev->irq_enabled = 0;
@@ -164,11 +168,11 @@ int drm_irq_uninstall(struct drm_device * dev)
164 if (!irq_enabled) 168 if (!irq_enabled)
165 return -EINVAL; 169 return -EINVAL;
166 170
167 DRM_DEBUG("irq=%d\n", dev->irq); 171 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
168 172
169 dev->driver->irq_uninstall(dev); 173 dev->driver->irq_uninstall(dev);
170 174
171 free_irq(dev->irq, dev); 175 free_irq(dev->pdev->irq, dev);
172 176
173 dev->locked_tasklet_func = NULL; 177 dev->locked_tasklet_func = NULL;
174 178
@@ -201,7 +205,7 @@ int drm_control(struct drm_device *dev, void *data,
201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 205 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 return 0; 206 return 0;
203 if (dev->if_version < DRM_IF_VERSION(1, 2) && 207 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
204 ctl->irq != dev->irq) 208 ctl->irq != dev->pdev->irq)
205 return -EINVAL; 209 return -EINVAL;
206 return drm_irq_install(dev); 210 return drm_irq_install(dev);
207 case DRM_UNINST_HANDLER: 211 case DRM_UNINST_HANDLER:
@@ -239,7 +243,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
239 int ret = 0; 243 int ret = 0;
240 unsigned int flags, seq; 244 unsigned int flags, seq;
241 245
242 if ((!dev->irq) || (!dev->irq_enabled)) 246 if ((!dev->pdev->irq) || (!dev->irq_enabled))
243 return -EINVAL; 247 return -EINVAL;
244 248
245 if (vblwait->request.type & 249 if (vblwait->request.type &
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4d56dfd25e04..27a1f78a7f1a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -84,7 +84,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
84 * may not have been called from userspace and after dev_private 84 * may not have been called from userspace and after dev_private
85 * is freed, it's too late. 85 * is freed, it's too late.
86 */ 86 */
87 if (dev->irq) 87 if (dev->irq_enabled)
88 drm_irq_uninstall(dev); 88 drm_irq_uninstall(dev);
89 89
90 if (dev_priv->ring.virtual_start) { 90 if (dev_priv->ring.virtual_start) {
@@ -644,7 +644,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
644 644
645 switch (param->param) { 645 switch (param->param) {
646 case I915_PARAM_IRQ_ACTIVE: 646 case I915_PARAM_IRQ_ACTIVE:
647 value = dev->irq ? 1 : 0; 647 value = dev->irq_enabled;
648 break; 648 break;
649 case I915_PARAM_ALLOW_BATCHBUFFER: 649 case I915_PARAM_ALLOW_BATCHBUFFER:
650 value = dev_priv->allow_batchbuffer ? 1 : 0; 650 value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -763,6 +763,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
763 ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 763 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
764 _DRM_KERNEL | _DRM_DRIVER, 764 _DRM_KERNEL | _DRM_DRIVER,
765 &dev_priv->mmio_map); 765 &dev_priv->mmio_map);
766
767
768 /* On the 945G/GM, the chipset reports the MSI capability on the
769 * integrated graphics even though the support isn't actually there
770 * according to the published specs. It doesn't appear to function
771 * correctly in testing on 945G.
772 * This may be a side effect of MSI having been made available for PEG
773 * and the registers being closely associated.
774 */
775 if (!IS_I945G(dev) && !IS_I945GM(dev))
776 pci_enable_msi(dev->pdev);
777
778 spin_lock_init(&dev_priv->user_irq_lock);
779
766 return ret; 780 return ret;
767} 781}
768 782
@@ -770,6 +784,9 @@ int i915_driver_unload(struct drm_device *dev)
770{ 784{
771 struct drm_i915_private *dev_priv = dev->dev_private; 785 struct drm_i915_private *dev_priv = dev->dev_private;
772 786
787 if (dev->pdev->msi_enabled)
788 pci_disable_msi(dev->pdev);
789
773 if (dev_priv->mmio_map) 790 if (dev_priv->mmio_map)
774 drm_rmmap(dev, dev_priv->mmio_map); 791 drm_rmmap(dev, dev_priv->mmio_map);
775 792
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index afb51a390e17..8daf0d84cd5a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -105,6 +105,12 @@ typedef struct drm_i915_private {
105 wait_queue_head_t irq_queue; 105 wait_queue_head_t irq_queue;
106 atomic_t irq_received; 106 atomic_t irq_received;
107 atomic_t irq_emitted; 107 atomic_t irq_emitted;
108 /** Protects user_irq_refcount and irq_mask_reg */
109 spinlock_t user_irq_lock;
110 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
111 int user_irq_refcount;
112 /** Cached value of IMR to avoid reads in updating the bitfield */
113 u32 irq_mask_reg;
108 114
109 int tex_lru_log_granularity; 115 int tex_lru_log_granularity;
110 int allow_batchbuffer; 116 int allow_batchbuffer;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4a2de7897344..24d11ed5bbc7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,6 +33,31 @@
33 33
34#define MAX_NOPID ((u32)~0) 34#define MAX_NOPID ((u32)~0)
35 35
36/** These are the interrupts used by the driver */
37#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
38 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
39 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
40
41static inline void
42i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
43{
44 if ((dev_priv->irq_mask_reg & mask) != 0) {
45 dev_priv->irq_mask_reg &= ~mask;
46 I915_WRITE(IMR, dev_priv->irq_mask_reg);
47 (void) I915_READ(IMR);
48 }
49}
50
51static inline void
52i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
53{
54 if ((dev_priv->irq_mask_reg & mask) != mask) {
55 dev_priv->irq_mask_reg |= mask;
56 I915_WRITE(IMR, dev_priv->irq_mask_reg);
57 (void) I915_READ(IMR);
58 }
59}
60
36/** 61/**
37 * Emit blits for scheduled buffer swaps. 62 * Emit blits for scheduled buffer swaps.
38 * 63 *
@@ -229,46 +254,50 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
229{ 254{
230 struct drm_device *dev = (struct drm_device *) arg; 255 struct drm_device *dev = (struct drm_device *) arg;
231 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 256 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
232 u16 temp;
233 u32 pipea_stats, pipeb_stats; 257 u32 pipea_stats, pipeb_stats;
258 u32 iir;
234 259
235 pipea_stats = I915_READ(PIPEASTAT); 260 pipea_stats = I915_READ(PIPEASTAT);
236 pipeb_stats = I915_READ(PIPEBSTAT); 261 pipeb_stats = I915_READ(PIPEBSTAT);
237 262
238 temp = I915_READ16(IIR); 263 if (dev->pdev->msi_enabled)
239 264 I915_WRITE(IMR, ~0);
240 temp &= (I915_USER_INTERRUPT | 265 iir = I915_READ(IIR);
241 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
242 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
243 266
244 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 267 DRM_DEBUG("iir=%08x\n", iir);
245 268
246 if (temp == 0) 269 if (iir == 0) {
270 if (dev->pdev->msi_enabled) {
271 I915_WRITE(IMR, dev_priv->irq_mask_reg);
272 (void) I915_READ(IMR);
273 }
247 return IRQ_NONE; 274 return IRQ_NONE;
275 }
248 276
249 I915_WRITE16(IIR, temp); 277 I915_WRITE(IIR, iir);
250 (void) I915_READ16(IIR); 278 if (dev->pdev->msi_enabled)
251 DRM_READMEMORYBARRIER(); 279 I915_WRITE(IMR, dev_priv->irq_mask_reg);
280 (void) I915_READ(IIR); /* Flush posted writes */
252 281
253 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 282 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
254 283
255 if (temp & I915_USER_INTERRUPT) 284 if (iir & I915_USER_INTERRUPT)
256 DRM_WAKEUP(&dev_priv->irq_queue); 285 DRM_WAKEUP(&dev_priv->irq_queue);
257 286
258 if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 287 if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
259 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) { 288 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
260 int vblank_pipe = dev_priv->vblank_pipe; 289 int vblank_pipe = dev_priv->vblank_pipe;
261 290
262 if ((vblank_pipe & 291 if ((vblank_pipe &
263 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) 292 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
264 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { 293 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
265 if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) 294 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
266 atomic_inc(&dev->vbl_received); 295 atomic_inc(&dev->vbl_received);
267 if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) 296 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
268 atomic_inc(&dev->vbl_received2); 297 atomic_inc(&dev->vbl_received2);
269 } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) && 298 } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
270 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || 299 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
271 ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) && 300 ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
272 (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) 301 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
273 atomic_inc(&dev->vbl_received); 302 atomic_inc(&dev->vbl_received);
274 303
@@ -314,6 +343,27 @@ static int i915_emit_irq(struct drm_device * dev)
314 return dev_priv->counter; 343 return dev_priv->counter;
315} 344}
316 345
346static void i915_user_irq_get(struct drm_device *dev)
347{
348 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
349
350 spin_lock(&dev_priv->user_irq_lock);
351 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
352 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
353 spin_unlock(&dev_priv->user_irq_lock);
354}
355
356static void i915_user_irq_put(struct drm_device *dev)
357{
358 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
359
360 spin_lock(&dev_priv->user_irq_lock);
361 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
362 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
363 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
364 spin_unlock(&dev_priv->user_irq_lock);
365}
366
317static int i915_wait_irq(struct drm_device * dev, int irq_nr) 367static int i915_wait_irq(struct drm_device * dev, int irq_nr)
318{ 368{
319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 369 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -322,13 +372,17 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
322 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 372 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
323 READ_BREADCRUMB(dev_priv)); 373 READ_BREADCRUMB(dev_priv));
324 374
325 if (READ_BREADCRUMB(dev_priv) >= irq_nr) 375 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
376 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
326 return 0; 377 return 0;
378 }
327 379
328 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 380 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
329 381
382 i915_user_irq_get(dev);
330 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 383 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
331 READ_BREADCRUMB(dev_priv) >= irq_nr); 384 READ_BREADCRUMB(dev_priv) >= irq_nr);
385 i915_user_irq_put(dev);
332 386
333 if (ret == -EBUSY) { 387 if (ret == -EBUSY) {
334 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 388 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -413,20 +467,6 @@ int i915_irq_wait(struct drm_device *dev, void *data,
413 return i915_wait_irq(dev, irqwait->irq_seq); 467 return i915_wait_irq(dev, irqwait->irq_seq);
414} 468}
415 469
416static void i915_enable_interrupt (struct drm_device *dev)
417{
418 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
419 u16 flag;
420
421 flag = 0;
422 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
423 flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
424 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
425 flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
426
427 I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
428}
429
430/* Set the vblank monitor pipe 470/* Set the vblank monitor pipe
431 */ 471 */
432int i915_vblank_pipe_set(struct drm_device *dev, void *data, 472int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -434,6 +474,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
434{ 474{
435 drm_i915_private_t *dev_priv = dev->dev_private; 475 drm_i915_private_t *dev_priv = dev->dev_private;
436 drm_i915_vblank_pipe_t *pipe = data; 476 drm_i915_vblank_pipe_t *pipe = data;
477 u32 enable_mask = 0, disable_mask = 0;
437 478
438 if (!dev_priv) { 479 if (!dev_priv) {
439 DRM_ERROR("called with no initialization\n"); 480 DRM_ERROR("called with no initialization\n");
@@ -445,9 +486,20 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
445 return -EINVAL; 486 return -EINVAL;
446 } 487 }
447 488
448 dev_priv->vblank_pipe = pipe->pipe; 489 if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
490 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
491 else
492 disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
493
494 if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
495 enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
496 else
497 disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
449 498
450 i915_enable_interrupt (dev); 499 i915_enable_irq(dev_priv, enable_mask);
500 i915_disable_irq(dev_priv, disable_mask);
501
502 dev_priv->vblank_pipe = pipe->pipe;
451 503
452 return 0; 504 return 0;
453} 505}
@@ -464,7 +516,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
464 return -EINVAL; 516 return -EINVAL;
465 } 517 }
466 518
467 flag = I915_READ(IER); 519 flag = I915_READ(IMR);
468 pipe->pipe = 0; 520 pipe->pipe = 0;
469 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) 521 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
470 pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 522 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
@@ -586,9 +638,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
586{ 638{
587 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
588 640
589 I915_WRITE16(HWSTAM, 0xfffe); 641 I915_WRITE(HWSTAM, 0xfffe);
590 I915_WRITE16(IMR, 0x0); 642 I915_WRITE(IMR, 0x0);
591 I915_WRITE16(IER, 0x0); 643 I915_WRITE(IER, 0x0);
592} 644}
593 645
594void i915_driver_irq_postinstall(struct drm_device * dev) 646void i915_driver_irq_postinstall(struct drm_device * dev)
@@ -601,7 +653,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
601 653
602 if (!dev_priv->vblank_pipe) 654 if (!dev_priv->vblank_pipe)
603 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 655 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
604 i915_enable_interrupt(dev); 656
657 /* Set initial unmasked IRQs to just the selected vblank pipes. */
658 dev_priv->irq_mask_reg = ~0;
659 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
660 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
661 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
662 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
663
664 I915_WRITE(IMR, dev_priv->irq_mask_reg);
665 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
666 (void) I915_READ(IER);
667
605 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 668 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
606} 669}
607 670
@@ -613,10 +676,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
613 if (!dev_priv) 676 if (!dev_priv)
614 return; 677 return;
615 678
616 I915_WRITE16(HWSTAM, 0xffff); 679 I915_WRITE(HWSTAM, 0xffff);
617 I915_WRITE16(IMR, 0xffff); 680 I915_WRITE(IMR, 0xffff);
618 I915_WRITE16(IER, 0x0); 681 I915_WRITE(IER, 0x0);
619 682
620 temp = I915_READ16(IIR); 683 temp = I915_READ(IIR);
621 I915_WRITE16(IIR, temp); 684 I915_WRITE(IIR, temp);
622} 685}