diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 269 |
1 files changed, 103 insertions, 166 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0dadc025b77..e418e8bb61e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,26 +64,6 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
73 | POSTING_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
83 | POSTING_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | static void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
@@ -105,26 +85,6 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
105 | } | 85 | } |
106 | } | 86 | } |
107 | 87 | ||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask & mask) != 0) { | ||
112 | dev_priv->irq_mask &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
114 | POSTING_READ(IMR); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void | ||
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask & mask) != mask) { | ||
122 | dev_priv->irq_mask |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
124 | POSTING_READ(IMR); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline u32 | 88 | static inline u32 |
129 | i915_pipestat(int pipe) | 89 | i915_pipestat(int pipe) |
130 | { | 90 | { |
@@ -389,9 +349,12 @@ static void notify_ring(struct drm_device *dev, | |||
389 | { | 349 | { |
390 | struct drm_i915_private *dev_priv = dev->dev_private; | 350 | struct drm_i915_private *dev_priv = dev->dev_private; |
391 | u32 seqno = ring->get_seqno(ring); | 351 | u32 seqno = ring->get_seqno(ring); |
392 | ring->irq_seqno = seqno; | 352 | |
393 | trace_i915_gem_request_complete(dev, seqno); | 353 | trace_i915_gem_request_complete(dev, seqno); |
354 | |||
355 | ring->irq_seqno = seqno; | ||
394 | wake_up_all(&ring->irq_queue); | 356 | wake_up_all(&ring->irq_queue); |
357 | |||
395 | dev_priv->hangcheck_count = 0; | 358 | dev_priv->hangcheck_count = 0; |
396 | mod_timer(&dev_priv->hangcheck_timer, | 359 | mod_timer(&dev_priv->hangcheck_timer, |
397 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 360 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -435,6 +398,50 @@ static void gen6_pm_irq_handler(struct drm_device *dev) | |||
435 | I915_WRITE(GEN6_PMIIR, pm_iir); | 398 | I915_WRITE(GEN6_PMIIR, pm_iir); |
436 | } | 399 | } |
437 | 400 | ||
401 | static void pch_irq_handler(struct drm_device *dev) | ||
402 | { | ||
403 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
404 | u32 pch_iir; | ||
405 | |||
406 | pch_iir = I915_READ(SDEIIR); | ||
407 | |||
408 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
409 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
410 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
411 | SDE_AUDIO_POWER_SHIFT); | ||
412 | |||
413 | if (pch_iir & SDE_GMBUS) | ||
414 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
415 | |||
416 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
417 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
418 | |||
419 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
420 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
421 | |||
422 | if (pch_iir & SDE_POISON) | ||
423 | DRM_ERROR("PCH poison interrupt\n"); | ||
424 | |||
425 | if (pch_iir & SDE_FDI_MASK) { | ||
426 | u32 fdia, fdib; | ||
427 | |||
428 | fdia = I915_READ(FDI_RXA_IIR); | ||
429 | fdib = I915_READ(FDI_RXB_IIR); | ||
430 | DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); | ||
431 | } | ||
432 | |||
433 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
434 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
435 | |||
436 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
437 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
438 | |||
439 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
440 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
441 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
442 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
443 | } | ||
444 | |||
438 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 445 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
439 | { | 446 | { |
440 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 447 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -502,8 +509,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
502 | drm_handle_vblank(dev, 1); | 509 | drm_handle_vblank(dev, 1); |
503 | 510 | ||
504 | /* check event from PCH */ | 511 | /* check event from PCH */ |
505 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) | 512 | if (de_iir & DE_PCH_EVENT) { |
506 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 513 | if (pch_iir & hotplug_mask) |
514 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
515 | pch_irq_handler(dev); | ||
516 | } | ||
507 | 517 | ||
508 | if (de_iir & DE_PCU_EVENT) { | 518 | if (de_iir & DE_PCU_EVENT) { |
509 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 519 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
@@ -556,10 +566,9 @@ static void i915_error_work_func(struct work_struct *work) | |||
556 | 566 | ||
557 | #ifdef CONFIG_DEBUG_FS | 567 | #ifdef CONFIG_DEBUG_FS |
558 | static struct drm_i915_error_object * | 568 | static struct drm_i915_error_object * |
559 | i915_error_object_create(struct drm_device *dev, | 569 | i915_error_object_create(struct drm_i915_private *dev_priv, |
560 | struct drm_i915_gem_object *src) | 570 | struct drm_i915_gem_object *src) |
561 | { | 571 | { |
562 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
563 | struct drm_i915_error_object *dst; | 572 | struct drm_i915_error_object *dst; |
564 | int page, page_count; | 573 | int page, page_count; |
565 | u32 reloc_offset; | 574 | u32 reloc_offset; |
@@ -632,52 +641,6 @@ i915_error_state_free(struct drm_device *dev, | |||
632 | kfree(error); | 641 | kfree(error); |
633 | } | 642 | } |
634 | 643 | ||
635 | static u32 | ||
636 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | ||
637 | { | ||
638 | u32 cmd; | ||
639 | |||
640 | if (IS_I830(dev) || IS_845G(dev)) | ||
641 | cmd = MI_BATCH_BUFFER; | ||
642 | else if (INTEL_INFO(dev)->gen >= 4) | ||
643 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
644 | MI_BATCH_NON_SECURE_I965); | ||
645 | else | ||
646 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
647 | |||
648 | return ring[0] == cmd ? ring[1] : 0; | ||
649 | } | ||
650 | |||
651 | static u32 | ||
652 | i915_ringbuffer_last_batch(struct drm_device *dev, | ||
653 | struct intel_ring_buffer *ring) | ||
654 | { | ||
655 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
656 | u32 head, bbaddr; | ||
657 | u32 *val; | ||
658 | |||
659 | /* Locate the current position in the ringbuffer and walk back | ||
660 | * to find the most recently dispatched batch buffer. | ||
661 | */ | ||
662 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | ||
663 | |||
664 | val = (u32 *)(ring->virtual_start + head); | ||
665 | while (--val >= (u32 *)ring->virtual_start) { | ||
666 | bbaddr = i915_get_bbaddr(dev, val); | ||
667 | if (bbaddr) | ||
668 | return bbaddr; | ||
669 | } | ||
670 | |||
671 | val = (u32 *)(ring->virtual_start + ring->size); | ||
672 | while (--val >= (u32 *)ring->virtual_start) { | ||
673 | bbaddr = i915_get_bbaddr(dev, val); | ||
674 | if (bbaddr) | ||
675 | return bbaddr; | ||
676 | } | ||
677 | |||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, | 644 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
682 | int count, | 645 | int count, |
683 | struct list_head *head) | 646 | struct list_head *head) |
@@ -702,6 +665,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, | |||
702 | err->dirty = obj->dirty; | 665 | err->dirty = obj->dirty; |
703 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 666 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
704 | err->ring = obj->ring ? obj->ring->id : 0; | 667 | err->ring = obj->ring ? obj->ring->id : 0; |
668 | err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; | ||
705 | 669 | ||
706 | if (++i == count) | 670 | if (++i == count) |
707 | break; | 671 | break; |
@@ -741,6 +705,36 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
741 | } | 705 | } |
742 | } | 706 | } |
743 | 707 | ||
708 | static struct drm_i915_error_object * | ||
709 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | ||
710 | struct intel_ring_buffer *ring) | ||
711 | { | ||
712 | struct drm_i915_gem_object *obj; | ||
713 | u32 seqno; | ||
714 | |||
715 | if (!ring->get_seqno) | ||
716 | return NULL; | ||
717 | |||
718 | seqno = ring->get_seqno(ring); | ||
719 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
720 | if (obj->ring != ring) | ||
721 | continue; | ||
722 | |||
723 | if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) | ||
724 | continue; | ||
725 | |||
726 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
727 | continue; | ||
728 | |||
729 | /* We need to copy these to an anonymous buffer as the simplest | ||
730 | * method to avoid being overwritten by userspace. | ||
731 | */ | ||
732 | return i915_error_object_create(dev_priv, obj); | ||
733 | } | ||
734 | |||
735 | return NULL; | ||
736 | } | ||
737 | |||
744 | /** | 738 | /** |
745 | * i915_capture_error_state - capture an error record for later analysis | 739 | * i915_capture_error_state - capture an error record for later analysis |
746 | * @dev: drm device | 740 | * @dev: drm device |
@@ -755,10 +749,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
755 | struct drm_i915_private *dev_priv = dev->dev_private; | 749 | struct drm_i915_private *dev_priv = dev->dev_private; |
756 | struct drm_i915_gem_object *obj; | 750 | struct drm_i915_gem_object *obj; |
757 | struct drm_i915_error_state *error; | 751 | struct drm_i915_error_state *error; |
758 | struct drm_i915_gem_object *batchbuffer[2]; | ||
759 | unsigned long flags; | 752 | unsigned long flags; |
760 | u32 bbaddr; | 753 | int i; |
761 | int count; | ||
762 | 754 | ||
763 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 755 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
764 | error = dev_priv->first_error; | 756 | error = dev_priv->first_error; |
@@ -817,83 +809,30 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
817 | } | 809 | } |
818 | i915_gem_record_fences(dev, error); | 810 | i915_gem_record_fences(dev, error); |
819 | 811 | ||
820 | bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); | 812 | /* Record the active batchbuffers */ |
821 | 813 | for (i = 0; i < I915_NUM_RINGS; i++) | |
822 | /* Grab the current batchbuffer, most likely to have crashed. */ | 814 | error->batchbuffer[i] = |
823 | batchbuffer[0] = NULL; | 815 | i915_error_first_batchbuffer(dev_priv, |
824 | batchbuffer[1] = NULL; | 816 | &dev_priv->ring[i]); |
825 | count = 0; | ||
826 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
827 | if (batchbuffer[0] == NULL && | ||
828 | bbaddr >= obj->gtt_offset && | ||
829 | bbaddr < obj->gtt_offset + obj->base.size) | ||
830 | batchbuffer[0] = obj; | ||
831 | |||
832 | if (batchbuffer[1] == NULL && | ||
833 | error->acthd >= obj->gtt_offset && | ||
834 | error->acthd < obj->gtt_offset + obj->base.size) | ||
835 | batchbuffer[1] = obj; | ||
836 | |||
837 | count++; | ||
838 | } | ||
839 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
840 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
841 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||
842 | if (batchbuffer[0] == NULL && | ||
843 | bbaddr >= obj->gtt_offset && | ||
844 | bbaddr < obj->gtt_offset + obj->base.size) | ||
845 | batchbuffer[0] = obj; | ||
846 | |||
847 | if (batchbuffer[1] == NULL && | ||
848 | error->acthd >= obj->gtt_offset && | ||
849 | error->acthd < obj->gtt_offset + obj->base.size) | ||
850 | batchbuffer[1] = obj; | ||
851 | |||
852 | if (batchbuffer[0] && batchbuffer[1]) | ||
853 | break; | ||
854 | } | ||
855 | } | ||
856 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
857 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | ||
858 | if (batchbuffer[0] == NULL && | ||
859 | bbaddr >= obj->gtt_offset && | ||
860 | bbaddr < obj->gtt_offset + obj->base.size) | ||
861 | batchbuffer[0] = obj; | ||
862 | |||
863 | if (batchbuffer[1] == NULL && | ||
864 | error->acthd >= obj->gtt_offset && | ||
865 | error->acthd < obj->gtt_offset + obj->base.size) | ||
866 | batchbuffer[1] = obj; | ||
867 | |||
868 | if (batchbuffer[0] && batchbuffer[1]) | ||
869 | break; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | /* We need to copy these to an anonymous buffer as the simplest | ||
874 | * method to avoid being overwritten by userspace. | ||
875 | */ | ||
876 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
877 | if (batchbuffer[1] != batchbuffer[0]) | ||
878 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
879 | else | ||
880 | error->batchbuffer[1] = NULL; | ||
881 | 817 | ||
882 | /* Record the ringbuffer */ | 818 | /* Record the ringbuffer */ |
883 | error->ringbuffer = i915_error_object_create(dev, | 819 | error->ringbuffer = i915_error_object_create(dev_priv, |
884 | dev_priv->ring[RCS].obj); | 820 | dev_priv->ring[RCS].obj); |
885 | 821 | ||
886 | /* Record buffers on the active and pinned lists. */ | 822 | /* Record buffers on the active and pinned lists. */ |
887 | error->active_bo = NULL; | 823 | error->active_bo = NULL; |
888 | error->pinned_bo = NULL; | 824 | error->pinned_bo = NULL; |
889 | 825 | ||
890 | error->active_bo_count = count; | 826 | i = 0; |
827 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | ||
828 | i++; | ||
829 | error->active_bo_count = i; | ||
891 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | 830 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
892 | count++; | 831 | i++; |
893 | error->pinned_bo_count = count - error->active_bo_count; | 832 | error->pinned_bo_count = i - error->active_bo_count; |
894 | 833 | ||
895 | if (count) { | 834 | if (i) { |
896 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | 835 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
897 | GFP_ATOMIC); | 836 | GFP_ATOMIC); |
898 | if (error->active_bo) | 837 | if (error->active_bo) |
899 | error->pinned_bo = | 838 | error->pinned_bo = |
@@ -1673,11 +1612,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1673 | 1612 | ||
1674 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1613 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1675 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 1614 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1676 | if (IS_GEN6(dev)) { | ||
1677 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); | ||
1678 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); | ||
1679 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1680 | } | ||
1681 | 1615 | ||
1682 | if (IS_GEN6(dev)) | 1616 | if (IS_GEN6(dev)) |
1683 | render_irqs = | 1617 | render_irqs = |
@@ -1698,6 +1632,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1698 | } else { | 1632 | } else { |
1699 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1633 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1700 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1634 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1635 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | ||
1636 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1637 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1701 | } | 1638 | } |
1702 | 1639 | ||
1703 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1640 | dev_priv->pch_irq_mask = ~hotplug_mask; |