aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-04 17:35:25 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-11 08:36:13 -0400
commitd0ecd7e221c87514b1eca84b11fee1e262f5d816 (patch)
treec74dec09d6f068020c90d03b63664bbaba5ce37f /drivers/gpu/drm/i915/i915_irq.c
parentde28075d5bb3e1e9f92d19da214b6a96f544b66d (diff)
drm/i915: irq handlers don't need interrupt-safe spinlocks
Since we only have one interrupt handler and interrupt handlers are non-reentrant. To drive the point really home give them all an _irq_handler suffix. This is a tiny micro-optimization but even more important it makes it clearer what locking we actually need. And in case someone screws this up: lockdep will catch hardirq vs. other context deadlocks. v2: Fix up compile fail. Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
1 files changed, 18 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 69957f889432..a7c0a730a6e1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -656,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
656 drm_kms_helper_hotplug_event(dev); 656 drm_kms_helper_hotplug_event(dev);
657} 657}
658 658
659static void ironlake_handle_rps_change(struct drm_device *dev) 659static void ironlake_rps_change_irq_handler(struct drm_device *dev)
660{ 660{
661 drm_i915_private_t *dev_priv = dev->dev_private; 661 drm_i915_private_t *dev_priv = dev->dev_private;
662 u32 busy_up, busy_down, max_avg, min_avg; 662 u32 busy_up, busy_down, max_avg, min_avg;
663 u8 new_delay; 663 u8 new_delay;
664 unsigned long flags;
665 664
666 spin_lock_irqsave(&mchdev_lock, flags); 665 spin_lock(&mchdev_lock);
667 666
668 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 667 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
669 668
@@ -691,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
691 if (ironlake_set_drps(dev, new_delay)) 690 if (ironlake_set_drps(dev, new_delay))
692 dev_priv->ips.cur_delay = new_delay; 691 dev_priv->ips.cur_delay = new_delay;
693 692
694 spin_unlock_irqrestore(&mchdev_lock, flags); 693 spin_unlock(&mchdev_lock);
695 694
696 return; 695 return;
697} 696}
@@ -835,18 +834,17 @@ static void ivybridge_parity_work(struct work_struct *work)
835 kfree(parity_event[1]); 834 kfree(parity_event[1]);
836} 835}
837 836
838static void ivybridge_handle_parity_error(struct drm_device *dev) 837static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
839{ 838{
840 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 839 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
841 unsigned long flags;
842 840
843 if (!HAS_L3_GPU_CACHE(dev)) 841 if (!HAS_L3_GPU_CACHE(dev))
844 return; 842 return;
845 843
846 spin_lock_irqsave(&dev_priv->irq_lock, flags); 844 spin_lock(&dev_priv->irq_lock);
847 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 845 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
848 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 846 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
849 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 847 spin_unlock(&dev_priv->irq_lock);
850 848
851 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 849 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
852} 850}
@@ -872,15 +870,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
872 } 870 }
873 871
874 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 872 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
875 ivybridge_handle_parity_error(dev); 873 ivybridge_parity_error_irq_handler(dev);
876} 874}
877 875
878/* Legacy way of handling PM interrupts */ 876/* Legacy way of handling PM interrupts */
879static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 877static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
880 u32 pm_iir) 878 u32 pm_iir)
881{ 879{
882 unsigned long flags;
883
884 /* 880 /*
885 * IIR bits should never already be set because IMR should 881 * IIR bits should never already be set because IMR should
886 * prevent an interrupt from being shown in IIR. The warning 882 * prevent an interrupt from being shown in IIR. The warning
@@ -891,11 +887,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
891 * The mask bit in IMR is cleared by dev_priv->rps.work. 887 * The mask bit in IMR is cleared by dev_priv->rps.work.
892 */ 888 */
893 889
894 spin_lock_irqsave(&dev_priv->rps.lock, flags); 890 spin_lock(&dev_priv->rps.lock);
895 dev_priv->rps.pm_iir |= pm_iir; 891 dev_priv->rps.pm_iir |= pm_iir;
896 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 892 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
897 POSTING_READ(GEN6_PMIMR); 893 POSTING_READ(GEN6_PMIMR);
898 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 894 spin_unlock(&dev_priv->rps.lock);
899 895
900 queue_work(dev_priv->wq, &dev_priv->rps.work); 896 queue_work(dev_priv->wq, &dev_priv->rps.work);
901} 897}
@@ -959,7 +955,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
959 wake_up_all(&dev_priv->gmbus_wait_queue); 955 wake_up_all(&dev_priv->gmbus_wait_queue);
960} 956}
961 957
962/* Unlike gen6_queue_rps_work() from which this function is originally derived, 958/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
963 * we must be able to deal with other PM interrupts. This is complicated because 959 * we must be able to deal with other PM interrupts. This is complicated because
964 * of the way in which we use the masks to defer the RPS work (which for 960 * of the way in which we use the masks to defer the RPS work (which for
965 * posterity is necessary because of forcewake). 961 * posterity is necessary because of forcewake).
@@ -967,9 +963,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
967static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 963static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
968 u32 pm_iir) 964 u32 pm_iir)
969{ 965{
970 unsigned long flags; 966 spin_lock(&dev_priv->rps.lock);
971
972 spin_lock_irqsave(&dev_priv->rps.lock, flags);
973 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 967 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
974 if (dev_priv->rps.pm_iir) { 968 if (dev_priv->rps.pm_iir) {
975 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 969 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
@@ -978,7 +972,7 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
978 /* TODO: if queue_work is slow, move it out of the spinlock */ 972 /* TODO: if queue_work is slow, move it out of the spinlock */
979 queue_work(dev_priv->wq, &dev_priv->rps.work); 973 queue_work(dev_priv->wq, &dev_priv->rps.work);
980 } 974 }
981 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 975 spin_unlock(&dev_priv->rps.lock);
982 976
983 if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 977 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
984 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 978 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
@@ -1060,7 +1054,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1060 gmbus_irq_handler(dev); 1054 gmbus_irq_handler(dev);
1061 1055
1062 if (pm_iir & GEN6_PM_RPS_EVENTS) 1056 if (pm_iir & GEN6_PM_RPS_EVENTS)
1063 gen6_queue_rps_work(dev_priv, pm_iir); 1057 gen6_rps_irq_handler(dev_priv, pm_iir);
1064 1058
1065 I915_WRITE(GTIIR, gt_iir); 1059 I915_WRITE(GTIIR, gt_iir);
1066 I915_WRITE(GEN6_PMIIR, pm_iir); 1060 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1298,7 +1292,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1298 if (IS_HASWELL(dev)) 1292 if (IS_HASWELL(dev))
1299 hsw_pm_irq_handler(dev_priv, pm_iir); 1293 hsw_pm_irq_handler(dev_priv, pm_iir);
1300 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1294 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1301 gen6_queue_rps_work(dev_priv, pm_iir); 1295 gen6_rps_irq_handler(dev_priv, pm_iir);
1302 I915_WRITE(GEN6_PMIIR, pm_iir); 1296 I915_WRITE(GEN6_PMIIR, pm_iir);
1303 ret = IRQ_HANDLED; 1297 ret = IRQ_HANDLED;
1304 } 1298 }
@@ -1415,10 +1409,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1415 } 1409 }
1416 1410
1417 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1411 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1418 ironlake_handle_rps_change(dev); 1412 ironlake_rps_change_irq_handler(dev);
1419 1413
1420 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) 1414 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1421 gen6_queue_rps_work(dev_priv, pm_iir); 1415 gen6_rps_irq_handler(dev_priv, pm_iir);
1422 1416
1423 I915_WRITE(GTIIR, gt_iir); 1417 I915_WRITE(GTIIR, gt_iir);
1424 I915_WRITE(DEIIR, de_iir); 1418 I915_WRITE(DEIIR, de_iir);