aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-11-04 18:50:30 -0500
committerDave Airlie <airlied@redhat.com>2008-11-24 18:27:43 -0500
commitcdfbc41f6d602fc0105fb2b4e0645cc1aa274c12 (patch)
tree94555bbfe2161c641494a82911a96b60783bca35 /drivers/gpu/drm/i915
parent7c463586427bbbad726ba561bae4ba5acada2481 (diff)
drm/i915: Remove IMR masking during interrupt handler, and restart it if needed.
The IMR masking was a technique recommended for avoiding getting stuck with no interrupts generated again in MSI mode. It kept new IIR bits from getting set between the IIR read and the IIR write, which would have otherwise prevented an MSI from ever getting generated again. However, this caused a problem for vblank as the IMR mask would keep the pipe event interrupt from getting reflected in IIR, even after the IMR mask was brought back down. Instead, just check the state of IIR after we ack the interrupts we're going to handle, and restart if we didn't get IIR all the way to zero. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c108
1 files changed, 61 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ca3ed1833908..654d42fabec8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -168,69 +168,83 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
168{ 168{
169 struct drm_device *dev = (struct drm_device *) arg; 169 struct drm_device *dev = (struct drm_device *) arg;
170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
171 u32 iir; 171 u32 iir, new_iir;
172 u32 pipea_stats = 0, pipeb_stats = 0; 172 u32 pipea_stats, pipeb_stats;
173 int vblank = 0; 173 int vblank = 0;
174 unsigned long irqflags; 174 unsigned long irqflags;
175 175
176 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
177 atomic_inc(&dev_priv->irq_received); 176 atomic_inc(&dev_priv->irq_received);
178 177
179 if (dev->pdev->msi_enabled)
180 I915_WRITE(IMR, ~0);
181 iir = I915_READ(IIR); 178 iir = I915_READ(IIR);
182 179
183 if (iir == 0) { 180 if (iir == 0)
184 if (dev->pdev->msi_enabled) {
185 I915_WRITE(IMR, dev_priv->irq_mask_reg);
186 (void) I915_READ(IMR);
187 }
188 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
189 return IRQ_NONE; 181 return IRQ_NONE;
190 }
191
192 /*
193 * Clear the PIPE(A|B)STAT regs before the IIR
194 */
195 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
196 pipea_stats = I915_READ(PIPEASTAT);
197 I915_WRITE(PIPEASTAT, pipea_stats);
198 }
199 182
200 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { 183 do {
201 pipeb_stats = I915_READ(PIPEBSTAT); 184 pipea_stats = 0;
202 I915_WRITE(PIPEBSTAT, pipeb_stats); 185 pipeb_stats = 0;
203 } 186 /*
187 * Clear the PIPE(A|B)STAT regs before the IIR
188 */
189 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
190 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
191 pipea_stats = I915_READ(PIPEASTAT);
192 I915_WRITE(PIPEASTAT, pipea_stats);
193 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
194 irqflags);
195 }
204 196
205 I915_WRITE(IIR, iir); 197 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
206 if (dev->pdev->msi_enabled) 198 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
207 I915_WRITE(IMR, dev_priv->irq_mask_reg); 199 pipeb_stats = I915_READ(PIPEBSTAT);
208 (void) I915_READ(IIR); /* Flush posted writes */ 200 I915_WRITE(PIPEBSTAT, pipeb_stats);
201 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
202 irqflags);
203 }
209 204
210 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 205 I915_WRITE(IIR, iir);
206 new_iir = I915_READ(IIR); /* Flush posted writes */
211 207
212 if (dev_priv->sarea_priv) 208 if (dev_priv->sarea_priv)
213 dev_priv->sarea_priv->last_dispatch = 209 dev_priv->sarea_priv->last_dispatch =
214 READ_BREADCRUMB(dev_priv); 210 READ_BREADCRUMB(dev_priv);
215 211
216 if (iir & I915_USER_INTERRUPT) { 212 if (iir & I915_USER_INTERRUPT) {
217 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 213 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
218 DRM_WAKEUP(&dev_priv->irq_queue); 214 DRM_WAKEUP(&dev_priv->irq_queue);
219 } 215 }
220 216
221 if (pipea_stats & I915_VBLANK_INTERRUPT_STATUS) { 217 if (pipea_stats & I915_VBLANK_INTERRUPT_STATUS) {
222 vblank++; 218 vblank++;
223 drm_handle_vblank(dev, 0); 219 drm_handle_vblank(dev, 0);
224 } 220 }
225 221
226 if (pipeb_stats & I915_VBLANK_INTERRUPT_STATUS) { 222 if (pipeb_stats & I915_VBLANK_INTERRUPT_STATUS) {
227 vblank++; 223 vblank++;
228 drm_handle_vblank(dev, 1); 224 drm_handle_vblank(dev, 1);
229 } 225 }
230 226
231 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 227 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
232 (iir & I915_ASLE_INTERRUPT)) 228 (iir & I915_ASLE_INTERRUPT))
233 opregion_asle_intr(dev); 229 opregion_asle_intr(dev);
230
231 /* With MSI, interrupts are only generated when iir
232 * transitions from zero to nonzero. If another bit got
233 * set while we were handling the existing iir bits, then
234 * we would never get another interrupt.
235 *
236 * This is fine on non-MSI as well, as if we hit this path
237 * we avoid exiting the interrupt handler only to generate
238 * another one.
239 *
240 * Note that for MSI this could cause a stray interrupt report
241 * if an interrupt landed in the time between writing IIR and
242 * the posting read. This should be rare enough to never
243 * trigger the 99% of 100,000 interrupts test for disabling
244 * stray interrupts.
245 */
246 iir = new_iir;
247 } while (iir != 0);
234 248
235 return IRQ_HANDLED; 249 return IRQ_HANDLED;
236} 250}