aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c279
1 files changed, 206 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a77faf4927d..a03b445ceb5f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85{ 85{
86 assert_spin_locked(&dev_priv->irq_lock); 86 assert_spin_locked(&dev_priv->irq_lock);
87 87
88 if (dev_priv->pc8.irqs_disabled) {
89 WARN(1, "IRQs disabled\n");
90 dev_priv->pc8.regsave.deimr &= ~mask;
91 return;
92 }
93
88 if ((dev_priv->irq_mask & mask) != 0) { 94 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask; 95 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask); 96 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 103{
98 assert_spin_locked(&dev_priv->irq_lock); 104 assert_spin_locked(&dev_priv->irq_lock);
99 105
106 if (dev_priv->pc8.irqs_disabled) {
107 WARN(1, "IRQs disabled\n");
108 dev_priv->pc8.regsave.deimr |= mask;
109 return;
110 }
111
100 if ((dev_priv->irq_mask & mask) != mask) { 112 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask; 113 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask); 114 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 } 116 }
105} 117}
106 118
119/**
120 * ilk_update_gt_irq - update GTIMR
121 * @dev_priv: driver private
122 * @interrupt_mask: mask of interrupt bits to update
123 * @enabled_irq_mask: mask of interrupt bits to enable
124 */
125static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
126 uint32_t interrupt_mask,
127 uint32_t enabled_irq_mask)
128{
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (dev_priv->pc8.irqs_disabled) {
132 WARN(1, "IRQs disabled\n");
133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
135 interrupt_mask);
136 return;
137 }
138
139 dev_priv->gt_irq_mask &= ~interrupt_mask;
140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
142 POSTING_READ(GTIMR);
143}
144
145void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
146{
147 ilk_update_gt_irq(dev_priv, mask, mask);
148}
149
150void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
151{
152 ilk_update_gt_irq(dev_priv, mask, 0);
153}
154
155/**
156 * snb_update_pm_irq - update GEN6_PMIMR
157 * @dev_priv: driver private
158 * @interrupt_mask: mask of interrupt bits to update
159 * @enabled_irq_mask: mask of interrupt bits to enable
160 */
161static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
162 uint32_t interrupt_mask,
163 uint32_t enabled_irq_mask)
164{
165 uint32_t new_val;
166
167 assert_spin_locked(&dev_priv->irq_lock);
168
169 if (dev_priv->pc8.irqs_disabled) {
170 WARN(1, "IRQs disabled\n");
171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
173 interrupt_mask);
174 return;
175 }
176
177 new_val = dev_priv->pm_irq_mask;
178 new_val &= ~interrupt_mask;
179 new_val |= (~enabled_irq_mask & interrupt_mask);
180
181 if (new_val != dev_priv->pm_irq_mask) {
182 dev_priv->pm_irq_mask = new_val;
183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
184 POSTING_READ(GEN6_PMIMR);
185 }
186}
187
188void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
189{
190 snb_update_pm_irq(dev_priv, mask, mask);
191}
192
193void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194{
195 snb_update_pm_irq(dev_priv, mask, 0);
196}
197
107static bool ivb_can_enable_err_int(struct drm_device *dev) 198static bool ivb_can_enable_err_int(struct drm_device *dev)
108{ 199{
109 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,6 +285,15 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
194 285
195 assert_spin_locked(&dev_priv->irq_lock); 286 assert_spin_locked(&dev_priv->irq_lock);
196 287
288 if (dev_priv->pc8.irqs_disabled &&
289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
290 WARN(1, "IRQs disabled\n");
291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
293 interrupt_mask);
294 return;
295 }
296
197 I915_WRITE(SDEIMR, sdeimr); 297 I915_WRITE(SDEIMR, sdeimr);
198 POSTING_READ(SDEIMR); 298 POSTING_READ(SDEIMR);
199} 299}
@@ -711,17 +811,19 @@ static void gen6_pm_rps_work(struct work_struct *work)
711{ 811{
712 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
713 rps.work); 813 rps.work);
714 u32 pm_iir, pm_imr; 814 u32 pm_iir;
715 u8 new_delay; 815 u8 new_delay;
716 816
717 spin_lock_irq(&dev_priv->irq_lock); 817 spin_lock_irq(&dev_priv->irq_lock);
718 pm_iir = dev_priv->rps.pm_iir; 818 pm_iir = dev_priv->rps.pm_iir;
719 dev_priv->rps.pm_iir = 0; 819 dev_priv->rps.pm_iir = 0;
720 pm_imr = I915_READ(GEN6_PMIMR);
721 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
722 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
723 spin_unlock_irq(&dev_priv->irq_lock); 822 spin_unlock_irq(&dev_priv->irq_lock);
724 823
824 /* Make sure we didn't queue anything we're not going to process. */
825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
826
725 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
726 return; 828 return;
727 829
@@ -806,8 +908,7 @@ static void ivybridge_parity_work(struct work_struct *work)
806 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
807 909
808 spin_lock_irqsave(&dev_priv->irq_lock, flags); 910 spin_lock_irqsave(&dev_priv->irq_lock, flags);
809 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
810 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
811 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
812 913
813 mutex_unlock(&dev_priv->dev->struct_mutex); 914 mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -837,8 +938,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
837 return; 938 return;
838 939
839 spin_lock(&dev_priv->irq_lock); 940 spin_lock(&dev_priv->irq_lock);
840 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 spin_unlock(&dev_priv->irq_lock); 942 spin_unlock(&dev_priv->irq_lock);
843 943
844 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
@@ -879,29 +979,6 @@ static void snb_gt_irq_handler(struct drm_device *dev,
879 ivybridge_parity_error_irq_handler(dev); 979 ivybridge_parity_error_irq_handler(dev);
880} 980}
881 981
882/* Legacy way of handling PM interrupts */
883static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
884 u32 pm_iir)
885{
886 /*
887 * IIR bits should never already be set because IMR should
888 * prevent an interrupt from being shown in IIR. The warning
889 * displays a case where we've unsafely cleared
890 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
891 * type is not a problem, it displays a problem in the logic.
892 *
893 * The mask bit in IMR is cleared by dev_priv->rps.work.
894 */
895
896 spin_lock(&dev_priv->irq_lock);
897 dev_priv->rps.pm_iir |= pm_iir;
898 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
899 POSTING_READ(GEN6_PMIMR);
900 spin_unlock(&dev_priv->irq_lock);
901
902 queue_work(dev_priv->wq, &dev_priv->rps.work);
903}
904
905#define HPD_STORM_DETECT_PERIOD 1000 982#define HPD_STORM_DETECT_PERIOD 1000
906#define HPD_STORM_THRESHOLD 5 983#define HPD_STORM_THRESHOLD 5
907 984
@@ -968,31 +1045,28 @@ static void dp_aux_irq_handler(struct drm_device *dev)
968 wake_up_all(&dev_priv->gmbus_wait_queue); 1045 wake_up_all(&dev_priv->gmbus_wait_queue);
969} 1046}
970 1047
971/* Unlike gen6_rps_irq_handler() from which this function is originally derived, 1048/* The RPS events need forcewake, so we add them to a work queue and mask their
972 * we must be able to deal with other PM interrupts. This is complicated because 1049 * IMR bits until the work is done. Other interrupts can be processed without
973 * of the way in which we use the masks to defer the RPS work (which for 1050 * the work queue. */
974 * posterity is necessary because of forcewake). 1051static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
975 */
976static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
977 u32 pm_iir)
978{ 1052{
979 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1053 if (pm_iir & GEN6_PM_RPS_EVENTS) {
980 spin_lock(&dev_priv->irq_lock); 1054 spin_lock(&dev_priv->irq_lock);
981 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1055 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
982 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 1056 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
983 /* never want to mask useful interrupts. (also posting read) */
984 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
985 spin_unlock(&dev_priv->irq_lock); 1057 spin_unlock(&dev_priv->irq_lock);
986 1058
987 queue_work(dev_priv->wq, &dev_priv->rps.work); 1059 queue_work(dev_priv->wq, &dev_priv->rps.work);
988 } 1060 }
989 1061
990 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1062 if (HAS_VEBOX(dev_priv->dev)) {
991 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1063 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1064 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
992 1065
993 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1066 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
994 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1067 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
995 i915_handle_error(dev_priv->dev, false); 1068 i915_handle_error(dev_priv->dev, false);
1069 }
996 } 1070 }
997} 1071}
998 1072
@@ -1064,7 +1138,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1064 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1138 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1065 gmbus_irq_handler(dev); 1139 gmbus_irq_handler(dev);
1066 1140
1067 if (pm_iir & GEN6_PM_RPS_EVENTS) 1141 if (pm_iir)
1068 gen6_rps_irq_handler(dev_priv, pm_iir); 1142 gen6_rps_irq_handler(dev_priv, pm_iir);
1069 1143
1070 I915_WRITE(GTIIR, gt_iir); 1144 I915_WRITE(GTIIR, gt_iir);
@@ -1309,6 +1383,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1310 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1384 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1311 irqreturn_t ret = IRQ_NONE; 1385 irqreturn_t ret = IRQ_NONE;
1386 bool err_int_reenable = false;
1312 1387
1313 atomic_inc(&dev_priv->irq_received); 1388 atomic_inc(&dev_priv->irq_received);
1314 1389
@@ -1337,7 +1412,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1337 * handler. */ 1412 * handler. */
1338 if (IS_HASWELL(dev)) { 1413 if (IS_HASWELL(dev)) {
1339 spin_lock(&dev_priv->irq_lock); 1414 spin_lock(&dev_priv->irq_lock);
1340 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1415 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1416 if (err_int_reenable)
1417 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1341 spin_unlock(&dev_priv->irq_lock); 1418 spin_unlock(&dev_priv->irq_lock);
1342 } 1419 }
1343 1420
@@ -1364,16 +1441,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1364 if (INTEL_INFO(dev)->gen >= 6) { 1441 if (INTEL_INFO(dev)->gen >= 6) {
1365 u32 pm_iir = I915_READ(GEN6_PMIIR); 1442 u32 pm_iir = I915_READ(GEN6_PMIIR);
1366 if (pm_iir) { 1443 if (pm_iir) {
1367 if (IS_HASWELL(dev)) 1444 gen6_rps_irq_handler(dev_priv, pm_iir);
1368 hsw_pm_irq_handler(dev_priv, pm_iir);
1369 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1370 gen6_rps_irq_handler(dev_priv, pm_iir);
1371 I915_WRITE(GEN6_PMIIR, pm_iir); 1445 I915_WRITE(GEN6_PMIIR, pm_iir);
1372 ret = IRQ_HANDLED; 1446 ret = IRQ_HANDLED;
1373 } 1447 }
1374 } 1448 }
1375 1449
1376 if (IS_HASWELL(dev)) { 1450 if (err_int_reenable) {
1377 spin_lock(&dev_priv->irq_lock); 1451 spin_lock(&dev_priv->irq_lock);
1378 if (ivb_can_enable_err_int(dev)) 1452 if (ivb_can_enable_err_int(dev))
1379 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1453 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1826,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1826 u32 tmp; 1900 u32 tmp;
1827 1901
1828 if (ring->hangcheck.acthd != acthd) 1902 if (ring->hangcheck.acthd != acthd)
1829 return active; 1903 return HANGCHECK_ACTIVE;
1830 1904
1831 if (IS_GEN2(dev)) 1905 if (IS_GEN2(dev))
1832 return hung; 1906 return HANGCHECK_HUNG;
1833 1907
1834 /* Is the chip hanging on a WAIT_FOR_EVENT? 1908 /* Is the chip hanging on a WAIT_FOR_EVENT?
1835 * If so we can simply poke the RB_WAIT bit 1909 * If so we can simply poke the RB_WAIT bit
@@ -1841,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1841 DRM_ERROR("Kicking stuck wait on %s\n", 1915 DRM_ERROR("Kicking stuck wait on %s\n",
1842 ring->name); 1916 ring->name);
1843 I915_WRITE_CTL(ring, tmp); 1917 I915_WRITE_CTL(ring, tmp);
1844 return kick; 1918 return HANGCHECK_KICK;
1845 } 1919 }
1846 1920
1847 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1848 switch (semaphore_passed(ring)) { 1922 switch (semaphore_passed(ring)) {
1849 default: 1923 default:
1850 return hung; 1924 return HANGCHECK_HUNG;
1851 case 1: 1925 case 1:
1852 DRM_ERROR("Kicking stuck semaphore on %s\n", 1926 DRM_ERROR("Kicking stuck semaphore on %s\n",
1853 ring->name); 1927 ring->name);
1854 I915_WRITE_CTL(ring, tmp); 1928 I915_WRITE_CTL(ring, tmp);
1855 return kick; 1929 return HANGCHECK_KICK;
1856 case 0: 1930 case 0:
1857 return wait; 1931 return HANGCHECK_WAIT;
1858 } 1932 }
1859 } 1933 }
1860 1934
1861 return hung; 1935 return HANGCHECK_HUNG;
1862} 1936}
1863 1937
1864/** 1938/**
@@ -1905,8 +1979,6 @@ static void i915_hangcheck_elapsed(unsigned long data)
1905 } else 1979 } else
1906 busy = false; 1980 busy = false;
1907 } else { 1981 } else {
1908 int score;
1909
1910 /* We always increment the hangcheck score 1982 /* We always increment the hangcheck score
1911 * if the ring is busy and still processing 1983 * if the ring is busy and still processing
1912 * the same request, so that no single request 1984 * the same request, so that no single request
@@ -1926,21 +1998,19 @@ static void i915_hangcheck_elapsed(unsigned long data)
1926 acthd); 1998 acthd);
1927 1999
1928 switch (ring->hangcheck.action) { 2000 switch (ring->hangcheck.action) {
1929 case wait: 2001 case HANGCHECK_WAIT:
1930 score = 0;
1931 break; 2002 break;
1932 case active: 2003 case HANGCHECK_ACTIVE:
1933 score = BUSY; 2004 ring->hangcheck.score += BUSY;
1934 break; 2005 break;
1935 case kick: 2006 case HANGCHECK_KICK:
1936 score = KICK; 2007 ring->hangcheck.score += KICK;
1937 break; 2008 break;
1938 case hung: 2009 case HANGCHECK_HUNG:
1939 score = HUNG; 2010 ring->hangcheck.score += HUNG;
1940 stuck[i] = true; 2011 stuck[i] = true;
1941 break; 2012 break;
1942 } 2013 }
1943 ring->hangcheck.score += score;
1944 } 2014 }
1945 } else { 2015 } else {
1946 /* Gradually reduce the count so that we catch DoS 2016 /* Gradually reduce the count so that we catch DoS
@@ -2158,8 +2228,9 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2158 if (HAS_VEBOX(dev)) 2228 if (HAS_VEBOX(dev))
2159 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2229 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2160 2230
2231 dev_priv->pm_irq_mask = 0xffffffff;
2161 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2232 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2162 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2233 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2163 I915_WRITE(GEN6_PMIER, pm_irqs); 2234 I915_WRITE(GEN6_PMIER, pm_irqs);
2164 POSTING_READ(GEN6_PMIER); 2235 POSTING_READ(GEN6_PMIER);
2165 } 2236 }
@@ -2403,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2403 u16 iir, new_iir; 2474 u16 iir, new_iir;
2404 u32 pipe_stats[2]; 2475 u32 pipe_stats[2];
2405 unsigned long irqflags; 2476 unsigned long irqflags;
2406 int irq_received;
2407 int pipe; 2477 int pipe;
2408 u16 flip_mask = 2478 u16 flip_mask =
2409 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2437,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2437 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2507 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2438 pipe_name(pipe)); 2508 pipe_name(pipe));
2439 I915_WRITE(reg, pipe_stats[pipe]); 2509 I915_WRITE(reg, pipe_stats[pipe]);
2440 irq_received = 1;
2441 } 2510 }
2442 } 2511 }
2443 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3081,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
3081 dev_priv->display.hpd_irq_setup(dev); 3150 dev_priv->display.hpd_irq_setup(dev);
3082 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3083} 3152}
3153
3154/* Disable interrupts so we can allow Package C8+. */
3155void hsw_pc8_disable_interrupts(struct drm_device *dev)
3156{
3157 struct drm_i915_private *dev_priv = dev->dev_private;
3158 unsigned long irqflags;
3159
3160 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3161
3162 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3163 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3164 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3165 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3166 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3167
3168 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3169 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3170 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3171 snb_disable_pm_irq(dev_priv, 0xffffffff);
3172
3173 dev_priv->pc8.irqs_disabled = true;
3174
3175 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3176}
3177
3178/* Restore interrupts so we can recover from Package C8+. */
3179void hsw_pc8_restore_interrupts(struct drm_device *dev)
3180{
3181 struct drm_i915_private *dev_priv = dev->dev_private;
3182 unsigned long irqflags;
3183 uint32_t val, expected;
3184
3185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3186
3187 val = I915_READ(DEIMR);
3188 expected = ~DE_PCH_EVENT_IVB;
3189 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3190
3191 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3192 expected = ~SDE_HOTPLUG_MASK_CPT;
3193 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3194 val, expected);
3195
3196 val = I915_READ(GTIMR);
3197 expected = 0xffffffff;
3198 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3199
3200 val = I915_READ(GEN6_PMIMR);
3201 expected = 0xffffffff;
3202 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3203 expected);
3204
3205 dev_priv->pc8.irqs_disabled = false;
3206
3207 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3208 ibx_enable_display_interrupt(dev_priv,
3209 ~dev_priv->pc8.regsave.sdeimr &
3210 ~SDE_HOTPLUG_MASK_CPT);
3211 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3212 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3213 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3214
3215 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3216}