aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1043
1 files changed, 859 insertions, 184 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9bd..5d1dedc02f15 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/circ_buf.h>
33#include <drm/drmP.h> 34#include <drm/drmP.h>
34#include <drm/i915_drm.h> 35#include <drm/i915_drm.h>
35#include "i915_drv.h" 36#include "i915_drv.h"
@@ -269,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
269 } 270 }
270} 271}
271 272
273static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum pipe pipe, bool enable)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 assert_spin_locked(&dev_priv->irq_lock);
279
280 if (enable)
281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282 else
283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286}
287
272/** 288/**
273 * ibx_display_interrupt_update - update SDEIMR 289 * ibx_display_interrupt_update - update SDEIMR
274 * @dev_priv: driver private 290 * @dev_priv: driver private
@@ -381,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
382 else if (IS_GEN7(dev)) 398 else if (IS_GEN7(dev))
383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
400 else if (IS_GEN8(dev))
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
384 402
385done: 403done:
386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -441,7 +459,7 @@ done:
441 459
442 460
443void 461void
444i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
445{ 463{
446 u32 reg = PIPESTAT(pipe); 464 u32 reg = PIPESTAT(pipe);
447 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +476,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
458} 476}
459 477
460void 478void
461i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 479i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
462{ 480{
463 u32 reg = PIPESTAT(pipe); 481 u32 reg = PIPESTAT(pipe);
464 u32 pipestat = I915_READ(reg) & 0x7fff0000; 482 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +504,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
486 504
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488 506
489 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
490 if (INTEL_INFO(dev)->gen >= 4) 508 if (INTEL_INFO(dev)->gen >= 4)
491 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 509 i915_enable_pipestat(dev_priv, PIPE_A,
510 PIPE_LEGACY_BLC_EVENT_ENABLE);
492 511
493 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
494} 513}
@@ -518,6 +537,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
518 } 537 }
519} 538}
520 539
540static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
541{
542 /* Gen2 doesn't have a hardware frame counter */
543 return 0;
544}
545
521/* Called from drm generic code, passed a 'crtc', which 546/* Called from drm generic code, passed a 'crtc', which
522 * we use as a pipe index 547 * we use as a pipe index
523 */ 548 */
@@ -526,7 +551,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
527 unsigned long high_frame; 552 unsigned long high_frame;
528 unsigned long low_frame; 553 unsigned long low_frame;
529 u32 high1, high2, low; 554 u32 high1, high2, low, pixel, vbl_start;
530 555
531 if (!i915_pipe_enabled(dev, pipe)) { 556 if (!i915_pipe_enabled(dev, pipe)) {
532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +559,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
534 return 0; 559 return 0;
535 } 560 }
536 561
562 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
563 struct intel_crtc *intel_crtc =
564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
565 const struct drm_display_mode *mode =
566 &intel_crtc->config.adjusted_mode;
567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else {
570 enum transcoder cpu_transcoder =
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal;
573
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
576
577 vbl_start *= htotal;
578 }
579
537 high_frame = PIPEFRAME(pipe); 580 high_frame = PIPEFRAME(pipe);
538 low_frame = PIPEFRAMEPIXEL(pipe); 581 low_frame = PIPEFRAMEPIXEL(pipe);
539 582
@@ -544,13 +587,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
544 */ 587 */
545 do { 588 do {
546 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
547 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 590 low = I915_READ(low_frame);
548 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
549 } while (high1 != high2); 592 } while (high1 != high2);
550 593
551 high1 >>= PIPE_FRAME_HIGH_SHIFT; 594 high1 >>= PIPE_FRAME_HIGH_SHIFT;
595 pixel = low & PIPE_PIXEL_MASK;
552 low >>= PIPE_FRAME_LOW_SHIFT; 596 low >>= PIPE_FRAME_LOW_SHIFT;
553 return (high1 << 8) | low; 597
598 /*
599 * The frame counter increments at beginning of active.
600 * Cook up a vblank counter by also checking the pixel
601 * counter against vblank start.
602 */
603 return ((high1 << 8) | low) + (pixel >= vbl_start);
554} 604}
555 605
556static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 606static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,66 +617,163 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
567 return I915_READ(reg); 617 return I915_READ(reg);
568} 618}
569 619
620/* raw reads, only for fast reads of display block, no need for forcewake etc. */
621#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
623
624static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
625{
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 uint32_t status;
628 int reg;
629
630 if (IS_VALLEYVIEW(dev)) {
631 status = pipe == PIPE_A ?
632 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
633 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
634
635 reg = VLV_ISR;
636 } else if (IS_GEN2(dev)) {
637 status = pipe == PIPE_A ?
638 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
639 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
640
641 reg = ISR;
642 } else if (INTEL_INFO(dev)->gen < 5) {
643 status = pipe == PIPE_A ?
644 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
645 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
646
647 reg = ISR;
648 } else if (INTEL_INFO(dev)->gen < 7) {
649 status = pipe == PIPE_A ?
650 DE_PIPEA_VBLANK :
651 DE_PIPEB_VBLANK;
652
653 reg = DEISR;
654 } else {
655 switch (pipe) {
656 default:
657 case PIPE_A:
658 status = DE_PIPEA_VBLANK_IVB;
659 break;
660 case PIPE_B:
661 status = DE_PIPEB_VBLANK_IVB;
662 break;
663 case PIPE_C:
664 status = DE_PIPEC_VBLANK_IVB;
665 break;
666 }
667
668 reg = DEISR;
669 }
670
671 if (IS_GEN2(dev))
672 return __raw_i915_read16(dev_priv, reg) & status;
673 else
674 return __raw_i915_read32(dev_priv, reg) & status;
675}
676
570static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 677static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
571 int *vpos, int *hpos) 678 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
572{ 679{
573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 680 struct drm_i915_private *dev_priv = dev->dev_private;
574 u32 vbl = 0, position = 0; 681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
683 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
684 int position;
575 int vbl_start, vbl_end, htotal, vtotal; 685 int vbl_start, vbl_end, htotal, vtotal;
576 bool in_vbl = true; 686 bool in_vbl = true;
577 int ret = 0; 687 int ret = 0;
578 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 688 unsigned long irqflags;
579 pipe);
580 689
581 if (!i915_pipe_enabled(dev, pipe)) { 690 if (!intel_crtc->active) {
582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 691 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
583 "pipe %c\n", pipe_name(pipe)); 692 "pipe %c\n", pipe_name(pipe));
584 return 0; 693 return 0;
585 } 694 }
586 695
587 /* Get vtotal. */ 696 htotal = mode->crtc_htotal;
588 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 697 vtotal = mode->crtc_vtotal;
698 vbl_start = mode->crtc_vblank_start;
699 vbl_end = mode->crtc_vblank_end;
589 700
590 if (INTEL_INFO(dev)->gen >= 4) { 701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702
703 /*
704 * Lock uncore.lock, as we will do multiple timing critical raw
705 * register reads, potentially with preemption disabled, so the
706 * following code must not block on uncore.lock.
707 */
708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
709
710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711
712 /* Get optional system timestamp before query. */
713 if (stime)
714 *stime = ktime_get();
715
716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
591 /* No obvious pixelcount register. Only query vertical 717 /* No obvious pixelcount register. Only query vertical
592 * scanout position from Display scan line register. 718 * scanout position from Display scan line register.
593 */ 719 */
594 position = I915_READ(PIPEDSL(pipe)); 720 if (IS_GEN2(dev))
721 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
722 else
723 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
595 724
596 /* Decode into vertical scanout position. Don't have 725 /*
597 * horizontal scanout position. 726 * The scanline counter increments at the leading edge
727 * of hsync, ie. it completely misses the active portion
728 * of the line. Fix up the counter at both edges of vblank
729 * to get a more accurate picture whether we're in vblank
730 * or not.
598 */ 731 */
599 *vpos = position & 0x1fff; 732 in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
600 *hpos = 0; 733 if ((in_vbl && position == vbl_start - 1) ||
734 (!in_vbl && position == vbl_end - 1))
735 position = (position + 1) % vtotal;
601 } else { 736 } else {
602 /* Have access to pixelcount since start of frame. 737 /* Have access to pixelcount since start of frame.
603 * We can split this into vertical and horizontal 738 * We can split this into vertical and horizontal
604 * scanout position. 739 * scanout position.
605 */ 740 */
606 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 741 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
607 742
608 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 743 /* convert to pixel counts */
609 *vpos = position / htotal; 744 vbl_start *= htotal;
610 *hpos = position - (*vpos * htotal); 745 vbl_end *= htotal;
746 vtotal *= htotal;
611 } 747 }
612 748
613 /* Query vblank area. */ 749 /* Get optional system timestamp after query. */
614 vbl = I915_READ(VBLANK(cpu_transcoder)); 750 if (etime)
751 *etime = ktime_get();
615 752
616 /* Test position against vblank region. */ 753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
617 vbl_start = vbl & 0x1fff;
618 vbl_end = (vbl >> 16) & 0x1fff;
619 754
620 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
621 in_vbl = false;
622 756
623 /* Inside "upper part" of vblank area? Apply corrective offset: */ 757 in_vbl = position >= vbl_start && position < vbl_end;
624 if (in_vbl && (*vpos >= vbl_start))
625 *vpos = *vpos - vtotal;
626 758
627 /* Readouts valid? */ 759 /*
628 if (vbl > 0) 760 * While in vblank, position will be negative
629 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 761 * counting up towards 0 at vbl_end. And outside
762 * vblank, position will be positive counting
763 * up since vbl_end.
764 */
765 if (position >= vbl_start)
766 position -= vbl_end;
767 else
768 position += vtotal - vbl_end;
769
770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
771 *vpos = position;
772 *hpos = 0;
773 } else {
774 *vpos = position / htotal;
775 *hpos = position - (*vpos * htotal);
776 }
630 777
631 /* In vblank? */ 778 /* In vblank? */
632 if (in_vbl) 779 if (in_vbl)
@@ -665,7 +812,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
665 crtc); 812 crtc);
666} 813}
667 814
668static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 815static bool intel_hpd_irq_event(struct drm_device *dev,
816 struct drm_connector *connector)
669{ 817{
670 enum drm_connector_status old_status; 818 enum drm_connector_status old_status;
671 819
@@ -673,11 +821,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
673 old_status = connector->status; 821 old_status = connector->status;
674 822
675 connector->status = connector->funcs->detect(connector, false); 823 connector->status = connector->funcs->detect(connector, false);
676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 824 if (old_status == connector->status)
825 return false;
826
827 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
677 connector->base.id, 828 connector->base.id,
678 drm_get_connector_name(connector), 829 drm_get_connector_name(connector),
679 old_status, connector->status); 830 drm_get_connector_status_name(old_status),
680 return (old_status != connector->status); 831 drm_get_connector_status_name(connector->status));
832
833 return true;
681} 834}
682 835
683/* 836/*
@@ -801,7 +954,7 @@ static void notify_ring(struct drm_device *dev,
801 if (ring->obj == NULL) 954 if (ring->obj == NULL)
802 return; 955 return;
803 956
804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 957 trace_i915_gem_request_complete(ring);
805 958
806 wake_up_all(&ring->irq_queue); 959 wake_up_all(&ring->irq_queue);
807 i915_queue_hangcheck(dev); 960 i915_queue_hangcheck(dev);
@@ -812,7 +965,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 965 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
813 rps.work); 966 rps.work);
814 u32 pm_iir; 967 u32 pm_iir;
815 u8 new_delay; 968 int new_delay, adj;
816 969
817 spin_lock_irq(&dev_priv->irq_lock); 970 spin_lock_irq(&dev_priv->irq_lock);
818 pm_iir = dev_priv->rps.pm_iir; 971 pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +982,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
829 982
830 mutex_lock(&dev_priv->rps.hw_lock); 983 mutex_lock(&dev_priv->rps.hw_lock);
831 984
985 adj = dev_priv->rps.last_adj;
832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 986 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
833 new_delay = dev_priv->rps.cur_delay + 1; 987 if (adj > 0)
988 adj *= 2;
989 else
990 adj = 1;
991 new_delay = dev_priv->rps.cur_delay + adj;
834 992
835 /* 993 /*
836 * For better performance, jump directly 994 * For better performance, jump directly
837 * to RPe if we're below it. 995 * to RPe if we're below it.
838 */ 996 */
839 if (IS_VALLEYVIEW(dev_priv->dev) && 997 if (new_delay < dev_priv->rps.rpe_delay)
840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 998 new_delay = dev_priv->rps.rpe_delay;
999 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1000 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
841 new_delay = dev_priv->rps.rpe_delay; 1001 new_delay = dev_priv->rps.rpe_delay;
842 } else 1002 else
843 new_delay = dev_priv->rps.cur_delay - 1; 1003 new_delay = dev_priv->rps.min_delay;
1004 adj = 0;
1005 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1006 if (adj < 0)
1007 adj *= 2;
1008 else
1009 adj = -1;
1010 new_delay = dev_priv->rps.cur_delay + adj;
1011 } else { /* unknown event */
1012 new_delay = dev_priv->rps.cur_delay;
1013 }
844 1014
845 /* sysfs frequency interfaces may have snuck in while servicing the 1015 /* sysfs frequency interfaces may have snuck in while servicing the
846 * interrupt 1016 * interrupt
847 */ 1017 */
848 if (new_delay >= dev_priv->rps.min_delay && 1018 if (new_delay < (int)dev_priv->rps.min_delay)
849 new_delay <= dev_priv->rps.max_delay) { 1019 new_delay = dev_priv->rps.min_delay;
850 if (IS_VALLEYVIEW(dev_priv->dev)) 1020 if (new_delay > (int)dev_priv->rps.max_delay)
851 valleyview_set_rps(dev_priv->dev, new_delay); 1021 new_delay = dev_priv->rps.max_delay;
852 else 1022 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
853 gen6_set_rps(dev_priv->dev, new_delay); 1023
854 } 1024 if (IS_VALLEYVIEW(dev_priv->dev))
855 1025 valleyview_set_rps(dev_priv->dev, new_delay);
856 if (IS_VALLEYVIEW(dev_priv->dev)) { 1026 else
857 /* 1027 gen6_set_rps(dev_priv->dev, new_delay);
858 * On VLV, when we enter RC6 we may not be at the minimum
859 * voltage level, so arm a timer to check. It should only
860 * fire when there's activity or once after we've entered
861 * RC6, and then won't be re-armed until the next RPS interrupt.
862 */
863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
864 msecs_to_jiffies(100));
865 }
866 1028
867 mutex_unlock(&dev_priv->rps.hw_lock); 1029 mutex_unlock(&dev_priv->rps.hw_lock);
868} 1030}
@@ -882,9 +1044,10 @@ static void ivybridge_parity_work(struct work_struct *work)
882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1044 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
883 l3_parity.error_work); 1045 l3_parity.error_work);
884 u32 error_status, row, bank, subbank; 1046 u32 error_status, row, bank, subbank;
885 char *parity_event[5]; 1047 char *parity_event[6];
886 uint32_t misccpctl; 1048 uint32_t misccpctl;
887 unsigned long flags; 1049 unsigned long flags;
1050 uint8_t slice = 0;
888 1051
889 /* We must turn off DOP level clock gating to access the L3 registers. 1052 /* We must turn off DOP level clock gating to access the L3 registers.
890 * In order to prevent a get/put style interface, acquire struct mutex 1053 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1055,81 @@ static void ivybridge_parity_work(struct work_struct *work)
892 */ 1055 */
893 mutex_lock(&dev_priv->dev->struct_mutex); 1056 mutex_lock(&dev_priv->dev->struct_mutex);
894 1057
1058 /* If we've screwed up tracking, just let the interrupt fire again */
1059 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1060 goto out;
1061
895 misccpctl = I915_READ(GEN7_MISCCPCTL); 1062 misccpctl = I915_READ(GEN7_MISCCPCTL);
896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1063 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
897 POSTING_READ(GEN7_MISCCPCTL); 1064 POSTING_READ(GEN7_MISCCPCTL);
898 1065
899 error_status = I915_READ(GEN7_L3CDERRST1); 1066 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
900 row = GEN7_PARITY_ERROR_ROW(error_status); 1067 u32 reg;
901 bank = GEN7_PARITY_ERROR_BANK(error_status);
902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
903 1068
904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 1069 slice--;
905 GEN7_L3CDERRST1_ENABLE); 1070 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
906 POSTING_READ(GEN7_L3CDERRST1); 1071 break;
907 1072
908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1073 dev_priv->l3_parity.which_slice &= ~(1<<slice);
909 1074
910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1075 reg = GEN7_L3CDERRST1 + (slice * 0x200);
911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
913 1076
914 mutex_unlock(&dev_priv->dev->struct_mutex); 1077 error_status = I915_READ(reg);
1078 row = GEN7_PARITY_ERROR_ROW(error_status);
1079 bank = GEN7_PARITY_ERROR_BANK(error_status);
1080 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
915 1081
916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1082 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1083 POSTING_READ(reg);
918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1084
919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1085 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
920 parity_event[4] = NULL; 1086 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1087 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1088 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1089 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1090 parity_event[5] = NULL;
1091
1092 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1093 KOBJ_CHANGE, parity_event);
1094
1095 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1096 slice, row, bank, subbank);
1097
1098 kfree(parity_event[4]);
1099 kfree(parity_event[3]);
1100 kfree(parity_event[2]);
1101 kfree(parity_event[1]);
1102 }
921 1103
922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1104 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
923 KOBJ_CHANGE, parity_event);
924 1105
925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 1106out:
926 row, bank, subbank); 1107 WARN_ON(dev_priv->l3_parity.which_slice);
1108 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1109 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1110 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
927 1111
928 kfree(parity_event[3]); 1112 mutex_unlock(&dev_priv->dev->struct_mutex);
929 kfree(parity_event[2]);
930 kfree(parity_event[1]);
931} 1113}
932 1114
933static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 1115static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
934{ 1116{
935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 1118
937 if (!HAS_L3_GPU_CACHE(dev)) 1119 if (!HAS_L3_DPF(dev))
938 return; 1120 return;
939 1121
940 spin_lock(&dev_priv->irq_lock); 1122 spin_lock(&dev_priv->irq_lock);
941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1123 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
942 spin_unlock(&dev_priv->irq_lock); 1124 spin_unlock(&dev_priv->irq_lock);
943 1125
1126 iir &= GT_PARITY_ERROR(dev);
1127 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1128 dev_priv->l3_parity.which_slice |= 1 << 1;
1129
1130 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1131 dev_priv->l3_parity.which_slice |= 1 << 0;
1132
944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1133 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
945} 1134}
946 1135
@@ -975,8 +1164,58 @@ static void snb_gt_irq_handler(struct drm_device *dev,
975 i915_handle_error(dev, false); 1164 i915_handle_error(dev, false);
976 } 1165 }
977 1166
978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1167 if (gt_iir & GT_PARITY_ERROR(dev))
979 ivybridge_parity_error_irq_handler(dev); 1168 ivybridge_parity_error_irq_handler(dev, gt_iir);
1169}
1170
1171static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1172 struct drm_i915_private *dev_priv,
1173 u32 master_ctl)
1174{
1175 u32 rcs, bcs, vcs;
1176 uint32_t tmp = 0;
1177 irqreturn_t ret = IRQ_NONE;
1178
1179 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1180 tmp = I915_READ(GEN8_GT_IIR(0));
1181 if (tmp) {
1182 ret = IRQ_HANDLED;
1183 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1184 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1185 if (rcs & GT_RENDER_USER_INTERRUPT)
1186 notify_ring(dev, &dev_priv->ring[RCS]);
1187 if (bcs & GT_RENDER_USER_INTERRUPT)
1188 notify_ring(dev, &dev_priv->ring[BCS]);
1189 I915_WRITE(GEN8_GT_IIR(0), tmp);
1190 } else
1191 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1192 }
1193
1194 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1195 tmp = I915_READ(GEN8_GT_IIR(1));
1196 if (tmp) {
1197 ret = IRQ_HANDLED;
1198 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1199 if (vcs & GT_RENDER_USER_INTERRUPT)
1200 notify_ring(dev, &dev_priv->ring[VCS]);
1201 I915_WRITE(GEN8_GT_IIR(1), tmp);
1202 } else
1203 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1204 }
1205
1206 if (master_ctl & GEN8_GT_VECS_IRQ) {
1207 tmp = I915_READ(GEN8_GT_IIR(3));
1208 if (tmp) {
1209 ret = IRQ_HANDLED;
1210 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1211 if (vcs & GT_RENDER_USER_INTERRUPT)
1212 notify_ring(dev, &dev_priv->ring[VECS]);
1213 I915_WRITE(GEN8_GT_IIR(3), tmp);
1214 } else
1215 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1216 }
1217
1218 return ret;
980} 1219}
981 1220
982#define HPD_STORM_DETECT_PERIOD 1000 1221#define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1289,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
1050 wake_up_all(&dev_priv->gmbus_wait_queue); 1289 wake_up_all(&dev_priv->gmbus_wait_queue);
1051} 1290}
1052 1291
1292#if defined(CONFIG_DEBUG_FS)
1293static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1294 uint32_t crc0, uint32_t crc1,
1295 uint32_t crc2, uint32_t crc3,
1296 uint32_t crc4)
1297{
1298 struct drm_i915_private *dev_priv = dev->dev_private;
1299 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1300 struct intel_pipe_crc_entry *entry;
1301 int head, tail;
1302
1303 spin_lock(&pipe_crc->lock);
1304
1305 if (!pipe_crc->entries) {
1306 spin_unlock(&pipe_crc->lock);
1307 DRM_ERROR("spurious interrupt\n");
1308 return;
1309 }
1310
1311 head = pipe_crc->head;
1312 tail = pipe_crc->tail;
1313
1314 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1315 spin_unlock(&pipe_crc->lock);
1316 DRM_ERROR("CRC buffer overflowing\n");
1317 return;
1318 }
1319
1320 entry = &pipe_crc->entries[head];
1321
1322 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1323 entry->crc[0] = crc0;
1324 entry->crc[1] = crc1;
1325 entry->crc[2] = crc2;
1326 entry->crc[3] = crc3;
1327 entry->crc[4] = crc4;
1328
1329 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1330 pipe_crc->head = head;
1331
1332 spin_unlock(&pipe_crc->lock);
1333
1334 wake_up_interruptible(&pipe_crc->wq);
1335}
1336#else
1337static inline void
1338display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1339 uint32_t crc0, uint32_t crc1,
1340 uint32_t crc2, uint32_t crc3,
1341 uint32_t crc4) {}
1342#endif
1343
1344
1345static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1346{
1347 struct drm_i915_private *dev_priv = dev->dev_private;
1348
1349 display_pipe_crc_irq_handler(dev, pipe,
1350 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1351 0, 0, 0, 0);
1352}
1353
1354static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1355{
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357
1358 display_pipe_crc_irq_handler(dev, pipe,
1359 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1360 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1361 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1362 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1363 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1364}
1365
1366static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1367{
1368 struct drm_i915_private *dev_priv = dev->dev_private;
1369 uint32_t res1, res2;
1370
1371 if (INTEL_INFO(dev)->gen >= 3)
1372 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1373 else
1374 res1 = 0;
1375
1376 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1377 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1378 else
1379 res2 = 0;
1380
1381 display_pipe_crc_irq_handler(dev, pipe,
1382 I915_READ(PIPE_CRC_RES_RED(pipe)),
1383 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1384 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1385 res1, res2);
1386}
1387
1053/* The RPS events need forcewake, so we add them to a work queue and mask their 1388/* The RPS events need forcewake, so we add them to a work queue and mask their
1054 * IMR bits until the work is done. Other interrupts can be processed without 1389 * IMR bits until the work is done. Other interrupts can be processed without
1055 * the work queue. */ 1390 * the work queue. */
@@ -1117,13 +1452,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1117 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1452 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1118 1453
1119 for_each_pipe(pipe) { 1454 for_each_pipe(pipe) {
1120 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1455 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1121 drm_handle_vblank(dev, pipe); 1456 drm_handle_vblank(dev, pipe);
1122 1457
1123 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1458 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1124 intel_prepare_page_flip(dev, pipe); 1459 intel_prepare_page_flip(dev, pipe);
1125 intel_finish_page_flip(dev, pipe); 1460 intel_finish_page_flip(dev, pipe);
1126 } 1461 }
1462
1463 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1464 i9xx_pipe_crc_irq_handler(dev, pipe);
1127 } 1465 }
1128 1466
1129 /* Consume port. Then clear IIR or we'll miss events */ 1467 /* Consume port. Then clear IIR or we'll miss events */
@@ -1212,21 +1550,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
1212{ 1550{
1213 struct drm_i915_private *dev_priv = dev->dev_private; 1551 struct drm_i915_private *dev_priv = dev->dev_private;
1214 u32 err_int = I915_READ(GEN7_ERR_INT); 1552 u32 err_int = I915_READ(GEN7_ERR_INT);
1553 enum pipe pipe;
1215 1554
1216 if (err_int & ERR_INT_POISON) 1555 if (err_int & ERR_INT_POISON)
1217 DRM_ERROR("Poison interrupt\n"); 1556 DRM_ERROR("Poison interrupt\n");
1218 1557
1219 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1558 for_each_pipe(pipe) {
1220 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1559 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1221 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1222 1561 false))
1223 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1562 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1224 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1563 pipe_name(pipe));
1225 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1564 }
1226 1565
1227 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1566 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1228 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1567 if (IS_IVYBRIDGE(dev))
1229 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1568 ivb_pipe_crc_irq_handler(dev, pipe);
1569 else
1570 hsw_pipe_crc_irq_handler(dev, pipe);
1571 }
1572 }
1230 1573
1231 I915_WRITE(GEN7_ERR_INT, err_int); 1574 I915_WRITE(GEN7_ERR_INT, err_int);
1232} 1575}
@@ -1297,6 +1640,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1297static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1640static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1298{ 1641{
1299 struct drm_i915_private *dev_priv = dev->dev_private; 1642 struct drm_i915_private *dev_priv = dev->dev_private;
1643 enum pipe pipe;
1300 1644
1301 if (de_iir & DE_AUX_CHANNEL_A) 1645 if (de_iir & DE_AUX_CHANNEL_A)
1302 dp_aux_irq_handler(dev); 1646 dp_aux_irq_handler(dev);
@@ -1304,31 +1648,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1304 if (de_iir & DE_GSE) 1648 if (de_iir & DE_GSE)
1305 intel_opregion_asle_intr(dev); 1649 intel_opregion_asle_intr(dev);
1306 1650
1307 if (de_iir & DE_PIPEA_VBLANK)
1308 drm_handle_vblank(dev, 0);
1309
1310 if (de_iir & DE_PIPEB_VBLANK)
1311 drm_handle_vblank(dev, 1);
1312
1313 if (de_iir & DE_POISON) 1651 if (de_iir & DE_POISON)
1314 DRM_ERROR("Poison interrupt\n"); 1652 DRM_ERROR("Poison interrupt\n");
1315 1653
1316 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1654 for_each_pipe(pipe) {
1317 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1655 if (de_iir & DE_PIPE_VBLANK(pipe))
1318 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1656 drm_handle_vblank(dev, pipe);
1319 1657
1320 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1658 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1321 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1659 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1322 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1660 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1661 pipe_name(pipe));
1323 1662
1324 if (de_iir & DE_PLANEA_FLIP_DONE) { 1663 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1325 intel_prepare_page_flip(dev, 0); 1664 i9xx_pipe_crc_irq_handler(dev, pipe);
1326 intel_finish_page_flip_plane(dev, 0);
1327 }
1328 1665
1329 if (de_iir & DE_PLANEB_FLIP_DONE) { 1666 /* plane/pipes map 1:1 on ilk+ */
1330 intel_prepare_page_flip(dev, 1); 1667 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1331 intel_finish_page_flip_plane(dev, 1); 1668 intel_prepare_page_flip(dev, pipe);
1669 intel_finish_page_flip_plane(dev, pipe);
1670 }
1332 } 1671 }
1333 1672
1334 /* check event from PCH */ 1673 /* check event from PCH */
@@ -1351,7 +1690,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1351static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1690static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1352{ 1691{
1353 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1354 int i; 1693 enum pipe i;
1355 1694
1356 if (de_iir & DE_ERR_INT_IVB) 1695 if (de_iir & DE_ERR_INT_IVB)
1357 ivb_err_int_handler(dev); 1696 ivb_err_int_handler(dev);
@@ -1362,10 +1701,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1362 if (de_iir & DE_GSE_IVB) 1701 if (de_iir & DE_GSE_IVB)
1363 intel_opregion_asle_intr(dev); 1702 intel_opregion_asle_intr(dev);
1364 1703
1365 for (i = 0; i < 3; i++) { 1704 for_each_pipe(i) {
1366 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1705 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1367 drm_handle_vblank(dev, i); 1706 drm_handle_vblank(dev, i);
1368 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1707
1708 /* plane/pipes map 1:1 on ilk+ */
1709 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1369 intel_prepare_page_flip(dev, i); 1710 intel_prepare_page_flip(dev, i);
1370 intel_finish_page_flip_plane(dev, i); 1711 intel_finish_page_flip_plane(dev, i);
1371 } 1712 }
@@ -1388,7 +1729,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1729 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1389 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1730 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1390 irqreturn_t ret = IRQ_NONE; 1731 irqreturn_t ret = IRQ_NONE;
1391 bool err_int_reenable = false;
1392 1732
1393 atomic_inc(&dev_priv->irq_received); 1733 atomic_inc(&dev_priv->irq_received);
1394 1734
@@ -1412,17 +1752,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1412 POSTING_READ(SDEIER); 1752 POSTING_READ(SDEIER);
1413 } 1753 }
1414 1754
1415 /* On Haswell, also mask ERR_INT because we don't want to risk
1416 * generating "unclaimed register" interrupts from inside the interrupt
1417 * handler. */
1418 if (IS_HASWELL(dev)) {
1419 spin_lock(&dev_priv->irq_lock);
1420 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1421 if (err_int_reenable)
1422 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1423 spin_unlock(&dev_priv->irq_lock);
1424 }
1425
1426 gt_iir = I915_READ(GTIIR); 1755 gt_iir = I915_READ(GTIIR);
1427 if (gt_iir) { 1756 if (gt_iir) {
1428 if (INTEL_INFO(dev)->gen >= 6) 1757 if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1781,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1452 } 1781 }
1453 } 1782 }
1454 1783
1455 if (err_int_reenable) {
1456 spin_lock(&dev_priv->irq_lock);
1457 if (ivb_can_enable_err_int(dev))
1458 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1459 spin_unlock(&dev_priv->irq_lock);
1460 }
1461
1462 I915_WRITE(DEIER, de_ier); 1784 I915_WRITE(DEIER, de_ier);
1463 POSTING_READ(DEIER); 1785 POSTING_READ(DEIER);
1464 if (!HAS_PCH_NOP(dev)) { 1786 if (!HAS_PCH_NOP(dev)) {
@@ -1469,6 +1791,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1469 return ret; 1791 return ret;
1470} 1792}
1471 1793
1794static irqreturn_t gen8_irq_handler(int irq, void *arg)
1795{
1796 struct drm_device *dev = arg;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798 u32 master_ctl;
1799 irqreturn_t ret = IRQ_NONE;
1800 uint32_t tmp = 0;
1801 enum pipe pipe;
1802
1803 atomic_inc(&dev_priv->irq_received);
1804
1805 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1806 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1807 if (!master_ctl)
1808 return IRQ_NONE;
1809
1810 I915_WRITE(GEN8_MASTER_IRQ, 0);
1811 POSTING_READ(GEN8_MASTER_IRQ);
1812
1813 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1814
1815 if (master_ctl & GEN8_DE_MISC_IRQ) {
1816 tmp = I915_READ(GEN8_DE_MISC_IIR);
1817 if (tmp & GEN8_DE_MISC_GSE)
1818 intel_opregion_asle_intr(dev);
1819 else if (tmp)
1820 DRM_ERROR("Unexpected DE Misc interrupt\n");
1821 else
1822 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1823
1824 if (tmp) {
1825 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1826 ret = IRQ_HANDLED;
1827 }
1828 }
1829
1830 if (master_ctl & GEN8_DE_PORT_IRQ) {
1831 tmp = I915_READ(GEN8_DE_PORT_IIR);
1832 if (tmp & GEN8_AUX_CHANNEL_A)
1833 dp_aux_irq_handler(dev);
1834 else if (tmp)
1835 DRM_ERROR("Unexpected DE Port interrupt\n");
1836 else
1837 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1838
1839 if (tmp) {
1840 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1841 ret = IRQ_HANDLED;
1842 }
1843 }
1844
1845 for_each_pipe(pipe) {
1846 uint32_t pipe_iir;
1847
1848 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1849 continue;
1850
1851 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1852 if (pipe_iir & GEN8_PIPE_VBLANK)
1853 drm_handle_vblank(dev, pipe);
1854
1855 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1856 intel_prepare_page_flip(dev, pipe);
1857 intel_finish_page_flip_plane(dev, pipe);
1858 }
1859
1860 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1861 hsw_pipe_crc_irq_handler(dev, pipe);
1862
1863 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1864 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1865 false))
1866 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1867 pipe_name(pipe));
1868 }
1869
1870 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1871 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1872 pipe_name(pipe),
1873 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1874 }
1875
1876 if (pipe_iir) {
1877 ret = IRQ_HANDLED;
1878 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1879 } else
1880 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1881 }
1882
1883 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1884 /*
1885 * FIXME(BDW): Assume for now that the new interrupt handling
1886 * scheme also closed the SDE interrupt handling race we've seen
1887 * on older pch-split platforms. But this needs testing.
1888 */
1889 u32 pch_iir = I915_READ(SDEIIR);
1890
1891 cpt_irq_handler(dev, pch_iir);
1892
1893 if (pch_iir) {
1894 I915_WRITE(SDEIIR, pch_iir);
1895 ret = IRQ_HANDLED;
1896 }
1897 }
1898
1899 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1900 POSTING_READ(GEN8_MASTER_IRQ);
1901
1902 return ret;
1903}
1904
1472static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1905static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1473 bool reset_completed) 1906 bool reset_completed)
1474{ 1907{
@@ -1516,7 +1949,7 @@ static void i915_error_work_func(struct work_struct *work)
1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1949 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1517 int ret; 1950 int ret;
1518 1951
1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1952 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1520 1953
1521 /* 1954 /*
1522 * Note that there's only one work item which does gpu resets, so we 1955 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1963,7 @@ static void i915_error_work_func(struct work_struct *work)
1530 */ 1963 */
1531 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1964 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1532 DRM_DEBUG_DRIVER("resetting chip\n"); 1965 DRM_DEBUG_DRIVER("resetting chip\n");
1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1966 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1534 reset_event); 1967 reset_event);
1535 1968
1536 /* 1969 /*
@@ -1557,7 +1990,7 @@ static void i915_error_work_func(struct work_struct *work)
1557 smp_mb__before_atomic_inc(); 1990 smp_mb__before_atomic_inc();
1558 atomic_inc(&dev_priv->gpu_error.reset_counter); 1991 atomic_inc(&dev_priv->gpu_error.reset_counter);
1559 1992
1560 kobject_uevent_env(&dev->primary->kdev.kobj, 1993 kobject_uevent_env(&dev->primary->kdev->kobj,
1561 KOBJ_CHANGE, reset_done_event); 1994 KOBJ_CHANGE, reset_done_event);
1562 } else { 1995 } else {
1563 atomic_set(&error->reset_counter, I915_WEDGED); 1996 atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2220,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1788 unsigned long irqflags; 2221 unsigned long irqflags;
1789 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2222 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1790 DE_PIPE_VBLANK_ILK(pipe); 2223 DE_PIPE_VBLANK(pipe);
1791 2224
1792 if (!i915_pipe_enabled(dev, pipe)) 2225 if (!i915_pipe_enabled(dev, pipe))
1793 return -EINVAL; 2226 return -EINVAL;
@@ -1810,7 +2243,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1810 2243
1811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2244 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1812 imr = I915_READ(VLV_IMR); 2245 imr = I915_READ(VLV_IMR);
1813 if (pipe == 0) 2246 if (pipe == PIPE_A)
1814 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2247 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1815 else 2248 else
1816 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2249 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1822,6 +2255,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1822 return 0; 2255 return 0;
1823} 2256}
1824 2257
2258static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2259{
2260 struct drm_i915_private *dev_priv = dev->dev_private;
2261 unsigned long irqflags;
2262
2263 if (!i915_pipe_enabled(dev, pipe))
2264 return -EINVAL;
2265
2266 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2267 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2268 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2269 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2270 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2271 return 0;
2272}
2273
1825/* Called from drm generic code, passed 'crtc' which 2274/* Called from drm generic code, passed 'crtc' which
1826 * we use as a pipe index 2275 * we use as a pipe index
1827 */ 2276 */
@@ -1845,7 +2294,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1845 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1846 unsigned long irqflags; 2295 unsigned long irqflags;
1847 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2296 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1848 DE_PIPE_VBLANK_ILK(pipe); 2297 DE_PIPE_VBLANK(pipe);
1849 2298
1850 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2299 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1851 ironlake_disable_display_irq(dev_priv, bit); 2300 ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2311,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1862 i915_disable_pipestat(dev_priv, pipe, 2311 i915_disable_pipestat(dev_priv, pipe,
1863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1864 imr = I915_READ(VLV_IMR); 2313 imr = I915_READ(VLV_IMR);
1865 if (pipe == 0) 2314 if (pipe == PIPE_A)
1866 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1867 else 2316 else
1868 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1870,6 +2319,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1870 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1871} 2320}
1872 2321
2322static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2323{
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 unsigned long irqflags;
2326
2327 if (!i915_pipe_enabled(dev, pipe))
2328 return;
2329
2330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2331 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2332 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2333 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2335}
2336
1873static u32 2337static u32
1874ring_last_seqno(struct intel_ring_buffer *ring) 2338ring_last_seqno(struct intel_ring_buffer *ring)
1875{ 2339{
@@ -1965,6 +2429,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1965 if (tmp & RING_WAIT) { 2429 if (tmp & RING_WAIT) {
1966 DRM_ERROR("Kicking stuck wait on %s\n", 2430 DRM_ERROR("Kicking stuck wait on %s\n",
1967 ring->name); 2431 ring->name);
2432 i915_handle_error(dev, false);
1968 I915_WRITE_CTL(ring, tmp); 2433 I915_WRITE_CTL(ring, tmp);
1969 return HANGCHECK_KICK; 2434 return HANGCHECK_KICK;
1970 } 2435 }
@@ -1976,6 +2441,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1976 case 1: 2441 case 1:
1977 DRM_ERROR("Kicking stuck semaphore on %s\n", 2442 DRM_ERROR("Kicking stuck semaphore on %s\n",
1978 ring->name); 2443 ring->name);
2444 i915_handle_error(dev, false);
1979 I915_WRITE_CTL(ring, tmp); 2445 I915_WRITE_CTL(ring, tmp);
1980 return HANGCHECK_KICK; 2446 return HANGCHECK_KICK;
1981 case 0: 2447 case 0:
@@ -2021,12 +2487,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
2021 2487
2022 if (ring->hangcheck.seqno == seqno) { 2488 if (ring->hangcheck.seqno == seqno) {
2023 if (ring_idle(ring, seqno)) { 2489 if (ring_idle(ring, seqno)) {
2490 ring->hangcheck.action = HANGCHECK_IDLE;
2491
2024 if (waitqueue_active(&ring->irq_queue)) { 2492 if (waitqueue_active(&ring->irq_queue)) {
2025 /* Issue a wake-up to catch stuck h/w. */ 2493 /* Issue a wake-up to catch stuck h/w. */
2026 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2494 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2027 ring->name); 2495 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2028 wake_up_all(&ring->irq_queue); 2496 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2029 ring->hangcheck.score += HUNG; 2497 ring->name);
2498 else
2499 DRM_INFO("Fake missed irq on %s\n",
2500 ring->name);
2501 wake_up_all(&ring->irq_queue);
2502 }
2503 /* Safeguard against driver failure */
2504 ring->hangcheck.score += BUSY;
2030 } else 2505 } else
2031 busy = false; 2506 busy = false;
2032 } else { 2507 } else {
@@ -2049,6 +2524,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2049 acthd); 2524 acthd);
2050 2525
2051 switch (ring->hangcheck.action) { 2526 switch (ring->hangcheck.action) {
2527 case HANGCHECK_IDLE:
2052 case HANGCHECK_WAIT: 2528 case HANGCHECK_WAIT:
2053 break; 2529 break;
2054 case HANGCHECK_ACTIVE: 2530 case HANGCHECK_ACTIVE:
@@ -2064,6 +2540,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2064 } 2540 }
2065 } 2541 }
2066 } else { 2542 } else {
2543 ring->hangcheck.action = HANGCHECK_ACTIVE;
2544
2067 /* Gradually reduce the count so that we catch DoS 2545 /* Gradually reduce the count so that we catch DoS
2068 * attempts across multiple batches. 2546 * attempts across multiple batches.
2069 */ 2547 */
@@ -2190,6 +2668,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2190 POSTING_READ(VLV_IER); 2668 POSTING_READ(VLV_IER);
2191} 2669}
2192 2670
2671static void gen8_irq_preinstall(struct drm_device *dev)
2672{
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 int pipe;
2675
2676 atomic_set(&dev_priv->irq_received, 0);
2677
2678 I915_WRITE(GEN8_MASTER_IRQ, 0);
2679 POSTING_READ(GEN8_MASTER_IRQ);
2680
2681 /* IIR can theoretically queue up two events. Be paranoid */
2682#define GEN8_IRQ_INIT_NDX(type, which) do { \
2683 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2684 POSTING_READ(GEN8_##type##_IMR(which)); \
2685 I915_WRITE(GEN8_##type##_IER(which), 0); \
2686 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2687 POSTING_READ(GEN8_##type##_IIR(which)); \
2688 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2689 } while (0)
2690
2691#define GEN8_IRQ_INIT(type) do { \
2692 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2693 POSTING_READ(GEN8_##type##_IMR); \
2694 I915_WRITE(GEN8_##type##_IER, 0); \
2695 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2696 POSTING_READ(GEN8_##type##_IIR); \
2697 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2698 } while (0)
2699
2700 GEN8_IRQ_INIT_NDX(GT, 0);
2701 GEN8_IRQ_INIT_NDX(GT, 1);
2702 GEN8_IRQ_INIT_NDX(GT, 2);
2703 GEN8_IRQ_INIT_NDX(GT, 3);
2704
2705 for_each_pipe(pipe) {
2706 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2707 }
2708
2709 GEN8_IRQ_INIT(DE_PORT);
2710 GEN8_IRQ_INIT(DE_MISC);
2711 GEN8_IRQ_INIT(PCU);
2712#undef GEN8_IRQ_INIT
2713#undef GEN8_IRQ_INIT_NDX
2714
2715 POSTING_READ(GEN8_PCU_IIR);
2716}
2717
2193static void ibx_hpd_irq_setup(struct drm_device *dev) 2718static void ibx_hpd_irq_setup(struct drm_device *dev)
2194{ 2719{
2195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2720 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2254,10 +2779,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2254 pm_irqs = gt_irqs = 0; 2779 pm_irqs = gt_irqs = 0;
2255 2780
2256 dev_priv->gt_irq_mask = ~0; 2781 dev_priv->gt_irq_mask = ~0;
2257 if (HAS_L3_GPU_CACHE(dev)) { 2782 if (HAS_L3_DPF(dev)) {
2258 /* L3 parity interrupt is always unmasked. */ 2783 /* L3 parity interrupt is always unmasked. */
2259 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2784 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2260 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2785 gt_irqs |= GT_PARITY_ERROR(dev);
2261 } 2786 }
2262 2787
2263 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2788 gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2831,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2306 } else { 2831 } else {
2307 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2832 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2308 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2833 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2309 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2834 DE_AUX_CHANNEL_A |
2310 DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2835 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2836 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2837 DE_POISON);
2311 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2838 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2312 } 2839 }
2313 2840
@@ -2341,7 +2868,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2341{ 2868{
2342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2869 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2343 u32 enable_mask; 2870 u32 enable_mask;
2344 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2871 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2872 PIPE_CRC_DONE_ENABLE;
2345 unsigned long irqflags; 2873 unsigned long irqflags;
2346 2874
2347 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2875 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2899,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2371 /* Interrupt setup is already guaranteed to be single-threaded, this is 2899 /* Interrupt setup is already guaranteed to be single-threaded, this is
2372 * just to make the assert_spin_locked check happy. */ 2900 * just to make the assert_spin_locked check happy. */
2373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2901 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2374 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2902 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2375 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2903 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2376 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2904 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2377 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2905 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2378 2906
2379 I915_WRITE(VLV_IIR, 0xffffffff); 2907 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2392,6 +2920,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2392 return 0; 2920 return 0;
2393} 2921}
2394 2922
2923static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2924{
2925 int i;
2926
2927 /* These are interrupts we'll toggle with the ring mask register */
2928 uint32_t gt_interrupts[] = {
2929 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2930 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2931 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2932 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2933 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2934 0,
2935 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2936 };
2937
2938 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2939 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2940 if (tmp)
2941 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2942 i, tmp);
2943 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2944 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2945 }
2946 POSTING_READ(GEN8_GT_IER(0));
2947}
2948
2949static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2950{
2951 struct drm_device *dev = dev_priv->dev;
2952 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2953 GEN8_PIPE_CDCLK_CRC_DONE |
2954 GEN8_PIPE_FIFO_UNDERRUN |
2955 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2956 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2957 int pipe;
2958 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2959 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2960 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2961
2962 for_each_pipe(pipe) {
2963 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2964 if (tmp)
2965 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2966 pipe, tmp);
2967 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2968 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2969 }
2970 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2971
2972 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2973 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2974 POSTING_READ(GEN8_DE_PORT_IER);
2975}
2976
2977static int gen8_irq_postinstall(struct drm_device *dev)
2978{
2979 struct drm_i915_private *dev_priv = dev->dev_private;
2980
2981 gen8_gt_irq_postinstall(dev_priv);
2982 gen8_de_irq_postinstall(dev_priv);
2983
2984 ibx_irq_postinstall(dev);
2985
2986 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2987 POSTING_READ(GEN8_MASTER_IRQ);
2988
2989 return 0;
2990}
2991
2992static void gen8_irq_uninstall(struct drm_device *dev)
2993{
2994 struct drm_i915_private *dev_priv = dev->dev_private;
2995 int pipe;
2996
2997 if (!dev_priv)
2998 return;
2999
3000 atomic_set(&dev_priv->irq_received, 0);
3001
3002 I915_WRITE(GEN8_MASTER_IRQ, 0);
3003
3004#define GEN8_IRQ_FINI_NDX(type, which) do { \
3005 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3006 I915_WRITE(GEN8_##type##_IER(which), 0); \
3007 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3008 } while (0)
3009
3010#define GEN8_IRQ_FINI(type) do { \
3011 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3012 I915_WRITE(GEN8_##type##_IER, 0); \
3013 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3014 } while (0)
3015
3016 GEN8_IRQ_FINI_NDX(GT, 0);
3017 GEN8_IRQ_FINI_NDX(GT, 1);
3018 GEN8_IRQ_FINI_NDX(GT, 2);
3019 GEN8_IRQ_FINI_NDX(GT, 3);
3020
3021 for_each_pipe(pipe) {
3022 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3023 }
3024
3025 GEN8_IRQ_FINI(DE_PORT);
3026 GEN8_IRQ_FINI(DE_MISC);
3027 GEN8_IRQ_FINI(PCU);
3028#undef GEN8_IRQ_FINI
3029#undef GEN8_IRQ_FINI_NDX
3030
3031 POSTING_READ(GEN8_PCU_IIR);
3032}
3033
2395static void valleyview_irq_uninstall(struct drm_device *dev) 3034static void valleyview_irq_uninstall(struct drm_device *dev)
2396{ 3035{
2397 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3036 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2464,6 +3103,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
2464static int i8xx_irq_postinstall(struct drm_device *dev) 3103static int i8xx_irq_postinstall(struct drm_device *dev)
2465{ 3104{
2466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3105 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3106 unsigned long irqflags;
2467 3107
2468 I915_WRITE16(EMR, 3108 I915_WRITE16(EMR,
2469 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3109 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +3124,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2484 I915_USER_INTERRUPT); 3124 I915_USER_INTERRUPT);
2485 POSTING_READ16(IER); 3125 POSTING_READ16(IER);
2486 3126
3127 /* Interrupt setup is already guaranteed to be single-threaded, this is
3128 * just to make the assert_spin_locked check happy. */
3129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3130 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3131 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3132 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3133
2487 return 0; 3134 return 0;
2488} 3135}
2489 3136
@@ -2570,13 +3217,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2570 if (iir & I915_USER_INTERRUPT) 3217 if (iir & I915_USER_INTERRUPT)
2571 notify_ring(dev, &dev_priv->ring[RCS]); 3218 notify_ring(dev, &dev_priv->ring[RCS]);
2572 3219
2573 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 3220 for_each_pipe(pipe) {
2574 i8xx_handle_vblank(dev, 0, iir)) 3221 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2575 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 3222 i8xx_handle_vblank(dev, pipe, iir))
3223 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2576 3224
2577 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 3225 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2578 i8xx_handle_vblank(dev, 1, iir)) 3226 i9xx_pipe_crc_irq_handler(dev, pipe);
2579 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 3227 }
2580 3228
2581 iir = new_iir; 3229 iir = new_iir;
2582 } 3230 }
@@ -2623,6 +3271,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2623{ 3271{
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3272 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625 u32 enable_mask; 3273 u32 enable_mask;
3274 unsigned long irqflags;
2626 3275
2627 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3276 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2628 3277
@@ -2658,6 +3307,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
2658 3307
2659 i915_enable_asle_pipestat(dev); 3308 i915_enable_asle_pipestat(dev);
2660 3309
3310 /* Interrupt setup is already guaranteed to be single-threaded, this is
3311 * just to make the assert_spin_locked check happy. */
3312 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3313 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3314 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3315 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3316
2661 return 0; 3317 return 0;
2662} 3318}
2663 3319
@@ -2769,6 +3425,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2769 3425
2770 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3426 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2771 blc_event = true; 3427 blc_event = true;
3428
3429 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3430 i9xx_pipe_crc_irq_handler(dev, pipe);
2772 } 3431 }
2773 3432
2774 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3433 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3526,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
2867 /* Interrupt setup is already guaranteed to be single-threaded, this is 3526 /* Interrupt setup is already guaranteed to be single-threaded, this is
2868 * just to make the assert_spin_locked check happy. */ 3527 * just to make the assert_spin_locked check happy. */
2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3528 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3529 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3530 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3531 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3532 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872 3533
2873 /* 3534 /*
@@ -3013,6 +3674,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3013 3674
3014 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3675 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3015 blc_event = true; 3676 blc_event = true;
3677
3678 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3679 i9xx_pipe_crc_irq_handler(dev, pipe);
3016 } 3680 }
3017 3681
3018 3682
@@ -3122,18 +3786,21 @@ void intel_irq_init(struct drm_device *dev)
3122 3786
3123 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3787 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3124 3788
3125 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3789 if (IS_GEN2(dev)) {
3126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3790 dev->max_vblank_count = 0;
3127 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3791 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3792 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3128 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3793 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3129 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3794 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3795 } else {
3796 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3797 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3130 } 3798 }
3131 3799
3132 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3800 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3133 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3801 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3134 else 3802 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3135 dev->driver->get_vblank_timestamp = NULL; 3803 }
3136 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3137 3804
3138 if (IS_VALLEYVIEW(dev)) { 3805 if (IS_VALLEYVIEW(dev)) {
3139 dev->driver->irq_handler = valleyview_irq_handler; 3806 dev->driver->irq_handler = valleyview_irq_handler;
@@ -3143,6 +3810,14 @@ void intel_irq_init(struct drm_device *dev)
3143 dev->driver->enable_vblank = valleyview_enable_vblank; 3810 dev->driver->enable_vblank = valleyview_enable_vblank;
3144 dev->driver->disable_vblank = valleyview_disable_vblank; 3811 dev->driver->disable_vblank = valleyview_disable_vblank;
3145 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3812 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3813 } else if (IS_GEN8(dev)) {
3814 dev->driver->irq_handler = gen8_irq_handler;
3815 dev->driver->irq_preinstall = gen8_irq_preinstall;
3816 dev->driver->irq_postinstall = gen8_irq_postinstall;
3817 dev->driver->irq_uninstall = gen8_irq_uninstall;
3818 dev->driver->enable_vblank = gen8_enable_vblank;
3819 dev->driver->disable_vblank = gen8_disable_vblank;
3820 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3146 } else if (HAS_PCH_SPLIT(dev)) { 3821 } else if (HAS_PCH_SPLIT(dev)) {
3147 dev->driver->irq_handler = ironlake_irq_handler; 3822 dev->driver->irq_handler = ironlake_irq_handler;
3148 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3823 dev->driver->irq_preinstall = ironlake_irq_preinstall;