aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaulo Zanoni <paulo.r.zanoni@intel.com>2013-08-06 17:57:12 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-23 08:52:26 -0400
commit43eaea131823c5ca13d03364e61bd15f0b22a0f7 (patch)
tree48bd1ea6a98a7bf294abe73e10b1e14fa85ebfe5
parenta40066412cc2ace1c1299e7a4d7a81dc33395b6f (diff)
drm/i915: wrap GTIMR changes
Just like the functions that touch DEIMR and SDEIMR, but for GTIMR. The new functions contain a POSTING_READ(GTIMR) which was not present at the 2 callers inside i915_irq.c. The implementation is based on ibx_display_interrupt_update. Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c22
3 files changed, 39 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 28d57477aa42..6bd4508666d2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -104,6 +104,34 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 } 104 }
105} 105}
106 106
107/**
108 * ilk_update_gt_irq - update GTIMR
109 * @dev_priv: driver private
110 * @interrupt_mask: mask of interrupt bits to update
111 * @enabled_irq_mask: mask of interrupt bits to enable
112 */
113static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
114 uint32_t interrupt_mask,
115 uint32_t enabled_irq_mask)
116{
117 assert_spin_locked(&dev_priv->irq_lock);
118
119 dev_priv->gt_irq_mask &= ~interrupt_mask;
120 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
121 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
122 POSTING_READ(GTIMR);
123}
124
125void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
126{
127 ilk_update_gt_irq(dev_priv, mask, mask);
128}
129
130void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
131{
132 ilk_update_gt_irq(dev_priv, mask, 0);
133}
134
107static bool ivb_can_enable_err_int(struct drm_device *dev) 135static bool ivb_can_enable_err_int(struct drm_device *dev)
108{ 136{
109 struct drm_i915_private *dev_priv = dev->dev_private; 137 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -806,8 +834,7 @@ static void ivybridge_parity_work(struct work_struct *work)
806 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 834 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
807 835
808 spin_lock_irqsave(&dev_priv->irq_lock, flags); 836 spin_lock_irqsave(&dev_priv->irq_lock, flags);
809 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 837 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
810 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
811 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 838 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
812 839
813 mutex_unlock(&dev_priv->dev->struct_mutex); 840 mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -837,8 +864,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
837 return; 864 return;
838 865
839 spin_lock(&dev_priv->irq_lock); 866 spin_lock(&dev_priv->irq_lock);
840 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 867 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 spin_unlock(&dev_priv->irq_lock); 868 spin_unlock(&dev_priv->irq_lock);
843 869
844 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 870 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 01455aa8b8bb..a8462064714c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -778,5 +778,8 @@ extern void intel_edp_psr_update(struct drm_device *dev);
778extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 778extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
779 bool switch_to_fclk, bool allow_power_down); 779 bool switch_to_fclk, bool allow_power_down);
780extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); 780extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
781extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
782extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
783 uint32_t mask);
781 784
782#endif /* __INTEL_DRV_H__ */ 785#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 34777168f700..2e370804248f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
836 return false; 836 return false;
837 837
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (ring->irq_refcount++ == 0) { 839 if (ring->irq_refcount++ == 0)
840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 840 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 POSTING_READ(GTIMR);
843 }
844 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
845 842
846 return true; 843 return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
854 unsigned long flags; 851 unsigned long flags;
855 852
856 spin_lock_irqsave(&dev_priv->irq_lock, flags); 853 spin_lock_irqsave(&dev_priv->irq_lock, flags);
857 if (--ring->irq_refcount == 0) { 854 if (--ring->irq_refcount == 0)
858 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 855 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
860 POSTING_READ(GTIMR);
861 }
862 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 856 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
863} 857}
864 858
@@ -1028,9 +1022,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1028 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1022 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1029 else 1023 else
1030 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1024 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1031 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 1025 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1032 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1033 POSTING_READ(GTIMR);
1034 } 1026 }
1035 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1027 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1036 1028
@@ -1051,9 +1043,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1051 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1043 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1052 else 1044 else
1053 I915_WRITE_IMR(ring, ~0); 1045 I915_WRITE_IMR(ring, ~0);
1054 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1046 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1055 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1056 POSTING_READ(GTIMR);
1057 } 1047 }
1058 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1048 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1059 1049