aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaulo Zanoni <paulo.r.zanoni@intel.com>2013-08-15 10:51:32 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-23 08:52:30 -0400
commit1403c0d4d46f2eed2ab13b89561c853988ad7513 (patch)
tree9a40a0cbbd1ab6cc55e9e4486219a6f3537f5761
parent4d3b3d5fd7d42a522a6c444388826bb23264db9f (diff)
drm/i915: merge HSW and SNB PM irq handlers
Because hsw_pm_irq_handler does exactly what gen6_rps_irq_handler does and also processes the 2 additional VEBOX bits. So merge those functions and wrap the VEBOX bits on a HAS_VEBOX check. This check isn't really necessary since the bits are reserved on SNB/IVB/VLV, but it's a good documentation on who uses them. v2: - Change IS_HASWELL check to HAS_VEBOX Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c50
1 files changed, 12 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e0c6f7d6189d..caf83da17bb0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -942,28 +942,6 @@ static void snb_gt_irq_handler(struct drm_device *dev,
942 ivybridge_parity_error_irq_handler(dev); 942 ivybridge_parity_error_irq_handler(dev);
943} 943}
944 944
945/* Legacy way of handling PM interrupts */
946static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
947 u32 pm_iir)
948{
949 /*
950 * IIR bits should never already be set because IMR should
951 * prevent an interrupt from being shown in IIR. The warning
952 * displays a case where we've unsafely cleared
953 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
954 * type is not a problem, it displays a problem in the logic.
955 *
956 * The mask bit in IMR is cleared by dev_priv->rps.work.
957 */
958
959 spin_lock(&dev_priv->irq_lock);
960 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
961 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
962 spin_unlock(&dev_priv->irq_lock);
963
964 queue_work(dev_priv->wq, &dev_priv->rps.work);
965}
966
967#define HPD_STORM_DETECT_PERIOD 1000 945#define HPD_STORM_DETECT_PERIOD 1000
968#define HPD_STORM_THRESHOLD 5 946#define HPD_STORM_THRESHOLD 5
969 947
@@ -1030,13 +1008,10 @@ static void dp_aux_irq_handler(struct drm_device *dev)
1030 wake_up_all(&dev_priv->gmbus_wait_queue); 1008 wake_up_all(&dev_priv->gmbus_wait_queue);
1031} 1009}
1032 1010
1033/* Unlike gen6_rps_irq_handler() from which this function is originally derived, 1011/* The RPS events need forcewake, so we add them to a work queue and mask their
1034 * we must be able to deal with other PM interrupts. This is complicated because 1012 * IMR bits until the work is done. Other interrupts can be processed without
1035 * of the way in which we use the masks to defer the RPS work (which for 1013 * the work queue. */
1036 * posterity is necessary because of forcewake). 1014static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1037 */
1038static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
1039 u32 pm_iir)
1040{ 1015{
1041 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1016 if (pm_iir & GEN6_PM_RPS_EVENTS) {
1042 spin_lock(&dev_priv->irq_lock); 1017 spin_lock(&dev_priv->irq_lock);
@@ -1047,12 +1022,14 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
1047 queue_work(dev_priv->wq, &dev_priv->rps.work); 1022 queue_work(dev_priv->wq, &dev_priv->rps.work);
1048 } 1023 }
1049 1024
1050 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1025 if (HAS_VEBOX(dev_priv->dev)) {
1051 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1026 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1027 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1052 1028
1053 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1029 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1054 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1030 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1055 i915_handle_error(dev_priv->dev, false); 1031 i915_handle_error(dev_priv->dev, false);
1032 }
1056 } 1033 }
1057} 1034}
1058 1035
@@ -1427,10 +1404,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1427 if (INTEL_INFO(dev)->gen >= 6) { 1404 if (INTEL_INFO(dev)->gen >= 6) {
1428 u32 pm_iir = I915_READ(GEN6_PMIIR); 1405 u32 pm_iir = I915_READ(GEN6_PMIIR);
1429 if (pm_iir) { 1406 if (pm_iir) {
1430 if (IS_HASWELL(dev)) 1407 gen6_rps_irq_handler(dev_priv, pm_iir);
1431 hsw_pm_irq_handler(dev_priv, pm_iir);
1432 else
1433 gen6_rps_irq_handler(dev_priv, pm_iir);
1434 I915_WRITE(GEN6_PMIIR, pm_iir); 1408 I915_WRITE(GEN6_PMIIR, pm_iir);
1435 ret = IRQ_HANDLED; 1409 ret = IRQ_HANDLED;
1436 } 1410 }