diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 882 |
1 files changed, 529 insertions, 353 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..97f946dcc1aa 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,64 +64,24 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask_reg &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
73 | (void) I915_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask_reg |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
83 | (void) I915_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | static void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
90 | { | 70 | { |
91 | if ((dev_priv->irq_mask_reg & mask) != 0) { | 71 | if ((dev_priv->irq_mask & mask) != 0) { |
92 | dev_priv->irq_mask_reg &= ~mask; | 72 | dev_priv->irq_mask &= ~mask; |
93 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 73 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
94 | (void) I915_READ(DEIMR); | 74 | POSTING_READ(DEIMR); |
95 | } | 75 | } |
96 | } | 76 | } |
97 | 77 | ||
98 | static inline void | 78 | static inline void |
99 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 79 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
100 | { | 80 | { |
101 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 81 | if ((dev_priv->irq_mask & mask) != mask) { |
102 | dev_priv->irq_mask_reg |= mask; | 82 | dev_priv->irq_mask |= mask; |
103 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 83 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
104 | (void) I915_READ(DEIMR); | 84 | POSTING_READ(DEIMR); |
105 | } | ||
106 | } | ||
107 | |||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask_reg & mask) != 0) { | ||
112 | dev_priv->irq_mask_reg &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
114 | (void) I915_READ(IMR); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void | ||
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask_reg & mask) != mask) { | ||
122 | dev_priv->irq_mask_reg |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
124 | (void) I915_READ(IMR); | ||
125 | } | 85 | } |
126 | } | 86 | } |
127 | 87 | ||
@@ -144,7 +104,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |||
144 | dev_priv->pipestat[pipe] |= mask; | 104 | dev_priv->pipestat[pipe] |= mask; |
145 | /* Enable the interrupt, clear any pending status */ | 105 | /* Enable the interrupt, clear any pending status */ |
146 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | 106 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); |
147 | (void) I915_READ(reg); | 107 | POSTING_READ(reg); |
148 | } | 108 | } |
149 | } | 109 | } |
150 | 110 | ||
@@ -156,16 +116,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |||
156 | 116 | ||
157 | dev_priv->pipestat[pipe] &= ~mask; | 117 | dev_priv->pipestat[pipe] &= ~mask; |
158 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 118 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
159 | (void) I915_READ(reg); | 119 | POSTING_READ(reg); |
160 | } | 120 | } |
161 | } | 121 | } |
162 | 122 | ||
163 | /** | 123 | /** |
164 | * intel_enable_asle - enable ASLE interrupt for OpRegion | 124 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
165 | */ | 125 | */ |
166 | void intel_enable_asle (struct drm_device *dev) | 126 | void intel_enable_asle(struct drm_device *dev) |
167 | { | 127 | { |
168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 128 | drm_i915_private_t *dev_priv = dev->dev_private; |
129 | unsigned long irqflags; | ||
130 | |||
131 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
169 | 132 | ||
170 | if (HAS_PCH_SPLIT(dev)) | 133 | if (HAS_PCH_SPLIT(dev)) |
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 134 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
@@ -176,6 +139,8 @@ void intel_enable_asle (struct drm_device *dev) | |||
176 | i915_enable_pipestat(dev_priv, 0, | 139 | i915_enable_pipestat(dev_priv, 0, |
177 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 140 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 141 | } |
142 | |||
143 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
179 | } | 144 | } |
180 | 145 | ||
181 | /** | 146 | /** |
@@ -243,6 +208,103 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
243 | return I915_READ(reg); | 208 | return I915_READ(reg); |
244 | } | 209 | } |
245 | 210 | ||
211 | int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | ||
212 | int *vpos, int *hpos) | ||
213 | { | ||
214 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
215 | u32 vbl = 0, position = 0; | ||
216 | int vbl_start, vbl_end, htotal, vtotal; | ||
217 | bool in_vbl = true; | ||
218 | int ret = 0; | ||
219 | |||
220 | if (!i915_pipe_enabled(dev, pipe)) { | ||
221 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | ||
222 | "pipe %d\n", pipe); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | /* Get vtotal. */ | ||
227 | vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); | ||
228 | |||
229 | if (INTEL_INFO(dev)->gen >= 4) { | ||
230 | /* No obvious pixelcount register. Only query vertical | ||
231 | * scanout position from Display scan line register. | ||
232 | */ | ||
233 | position = I915_READ(PIPEDSL(pipe)); | ||
234 | |||
235 | /* Decode into vertical scanout position. Don't have | ||
236 | * horizontal scanout position. | ||
237 | */ | ||
238 | *vpos = position & 0x1fff; | ||
239 | *hpos = 0; | ||
240 | } else { | ||
241 | /* Have access to pixelcount since start of frame. | ||
242 | * We can split this into vertical and horizontal | ||
243 | * scanout position. | ||
244 | */ | ||
245 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | ||
246 | |||
247 | htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); | ||
248 | *vpos = position / htotal; | ||
249 | *hpos = position - (*vpos * htotal); | ||
250 | } | ||
251 | |||
252 | /* Query vblank area. */ | ||
253 | vbl = I915_READ(VBLANK(pipe)); | ||
254 | |||
255 | /* Test position against vblank region. */ | ||
256 | vbl_start = vbl & 0x1fff; | ||
257 | vbl_end = (vbl >> 16) & 0x1fff; | ||
258 | |||
259 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | ||
260 | in_vbl = false; | ||
261 | |||
262 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | ||
263 | if (in_vbl && (*vpos >= vbl_start)) | ||
264 | *vpos = *vpos - vtotal; | ||
265 | |||
266 | /* Readouts valid? */ | ||
267 | if (vbl > 0) | ||
268 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | ||
269 | |||
270 | /* In vblank? */ | ||
271 | if (in_vbl) | ||
272 | ret |= DRM_SCANOUTPOS_INVBL; | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | ||
278 | int *max_error, | ||
279 | struct timeval *vblank_time, | ||
280 | unsigned flags) | ||
281 | { | ||
282 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
283 | struct drm_crtc *crtc; | ||
284 | |||
285 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | ||
286 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
287 | return -EINVAL; | ||
288 | } | ||
289 | |||
290 | /* Get drm_crtc to timestamp: */ | ||
291 | crtc = intel_get_crtc_for_pipe(dev, pipe); | ||
292 | if (crtc == NULL) { | ||
293 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | if (!crtc->enabled) { | ||
298 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | ||
299 | return -EBUSY; | ||
300 | } | ||
301 | |||
302 | /* Helper routine in DRM core does all the work: */ | ||
303 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | ||
304 | vblank_time, flags, | ||
305 | crtc); | ||
306 | } | ||
307 | |||
246 | /* | 308 | /* |
247 | * Handle hotplug events outside the interrupt handler proper. | 309 | * Handle hotplug events outside the interrupt handler proper. |
248 | */ | 310 | */ |
@@ -297,20 +359,109 @@ static void notify_ring(struct drm_device *dev, | |||
297 | struct intel_ring_buffer *ring) | 359 | struct intel_ring_buffer *ring) |
298 | { | 360 | { |
299 | struct drm_i915_private *dev_priv = dev->dev_private; | 361 | struct drm_i915_private *dev_priv = dev->dev_private; |
300 | u32 seqno = ring->get_seqno(dev, ring); | 362 | u32 seqno; |
301 | ring->irq_gem_seqno = seqno; | 363 | |
364 | if (ring->obj == NULL) | ||
365 | return; | ||
366 | |||
367 | seqno = ring->get_seqno(ring); | ||
302 | trace_i915_gem_request_complete(dev, seqno); | 368 | trace_i915_gem_request_complete(dev, seqno); |
369 | |||
370 | ring->irq_seqno = seqno; | ||
303 | wake_up_all(&ring->irq_queue); | 371 | wake_up_all(&ring->irq_queue); |
372 | |||
304 | dev_priv->hangcheck_count = 0; | 373 | dev_priv->hangcheck_count = 0; |
305 | mod_timer(&dev_priv->hangcheck_timer, | 374 | mod_timer(&dev_priv->hangcheck_timer, |
306 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 375 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
307 | } | 376 | } |
308 | 377 | ||
378 | static void gen6_pm_irq_handler(struct drm_device *dev) | ||
379 | { | ||
380 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
381 | u8 new_delay = dev_priv->cur_delay; | ||
382 | u32 pm_iir; | ||
383 | |||
384 | pm_iir = I915_READ(GEN6_PMIIR); | ||
385 | if (!pm_iir) | ||
386 | return; | ||
387 | |||
388 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | ||
389 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
390 | new_delay = dev_priv->cur_delay + 1; | ||
391 | if (new_delay > dev_priv->max_delay) | ||
392 | new_delay = dev_priv->max_delay; | ||
393 | } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { | ||
394 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
395 | new_delay = dev_priv->cur_delay - 1; | ||
396 | if (new_delay < dev_priv->min_delay) { | ||
397 | new_delay = dev_priv->min_delay; | ||
398 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
399 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) | | ||
400 | ((new_delay << 16) & 0x3f0000)); | ||
401 | } else { | ||
402 | /* Make sure we continue to get down interrupts | ||
403 | * until we hit the minimum frequency */ | ||
404 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
405 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); | ||
406 | } | ||
407 | |||
408 | } | ||
409 | |||
410 | gen6_set_rps(dev, new_delay); | ||
411 | dev_priv->cur_delay = new_delay; | ||
412 | |||
413 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
414 | } | ||
415 | |||
416 | static void pch_irq_handler(struct drm_device *dev) | ||
417 | { | ||
418 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
419 | u32 pch_iir; | ||
420 | |||
421 | pch_iir = I915_READ(SDEIIR); | ||
422 | |||
423 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
424 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
425 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
426 | SDE_AUDIO_POWER_SHIFT); | ||
427 | |||
428 | if (pch_iir & SDE_GMBUS) | ||
429 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
430 | |||
431 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
432 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
433 | |||
434 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
435 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
436 | |||
437 | if (pch_iir & SDE_POISON) | ||
438 | DRM_ERROR("PCH poison interrupt\n"); | ||
439 | |||
440 | if (pch_iir & SDE_FDI_MASK) { | ||
441 | u32 fdia, fdib; | ||
442 | |||
443 | fdia = I915_READ(FDI_RXA_IIR); | ||
444 | fdib = I915_READ(FDI_RXB_IIR); | ||
445 | DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); | ||
446 | } | ||
447 | |||
448 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
449 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
450 | |||
451 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
452 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
453 | |||
454 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
455 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
456 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
457 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
458 | } | ||
459 | |||
309 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 460 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
310 | { | 461 | { |
311 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 462 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
312 | int ret = IRQ_NONE; | 463 | int ret = IRQ_NONE; |
313 | u32 de_iir, gt_iir, de_ier, pch_iir; | 464 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
314 | u32 hotplug_mask; | 465 | u32 hotplug_mask; |
315 | struct drm_i915_master_private *master_priv; | 466 | struct drm_i915_master_private *master_priv; |
316 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | 467 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; |
@@ -321,13 +472,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
321 | /* disable master interrupt before clearing iir */ | 472 | /* disable master interrupt before clearing iir */ |
322 | de_ier = I915_READ(DEIER); | 473 | de_ier = I915_READ(DEIER); |
323 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 474 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
324 | (void)I915_READ(DEIER); | 475 | POSTING_READ(DEIER); |
325 | 476 | ||
326 | de_iir = I915_READ(DEIIR); | 477 | de_iir = I915_READ(DEIIR); |
327 | gt_iir = I915_READ(GTIIR); | 478 | gt_iir = I915_READ(GTIIR); |
328 | pch_iir = I915_READ(SDEIIR); | 479 | pch_iir = I915_READ(SDEIIR); |
480 | pm_iir = I915_READ(GEN6_PMIIR); | ||
329 | 481 | ||
330 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 482 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && |
483 | (!IS_GEN6(dev) || pm_iir == 0)) | ||
331 | goto done; | 484 | goto done; |
332 | 485 | ||
333 | if (HAS_PCH_CPT(dev)) | 486 | if (HAS_PCH_CPT(dev)) |
@@ -344,12 +497,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
344 | READ_BREADCRUMB(dev_priv); | 497 | READ_BREADCRUMB(dev_priv); |
345 | } | 498 | } |
346 | 499 | ||
347 | if (gt_iir & GT_PIPE_NOTIFY) | 500 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
348 | notify_ring(dev, &dev_priv->render_ring); | 501 | notify_ring(dev, &dev_priv->ring[RCS]); |
349 | if (gt_iir & bsd_usr_interrupt) | 502 | if (gt_iir & bsd_usr_interrupt) |
350 | notify_ring(dev, &dev_priv->bsd_ring); | 503 | notify_ring(dev, &dev_priv->ring[VCS]); |
351 | if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) | 504 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
352 | notify_ring(dev, &dev_priv->blt_ring); | 505 | notify_ring(dev, &dev_priv->ring[BCS]); |
353 | 506 | ||
354 | if (de_iir & DE_GSE) | 507 | if (de_iir & DE_GSE) |
355 | intel_opregion_gse_intr(dev); | 508 | intel_opregion_gse_intr(dev); |
@@ -371,14 +524,20 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
371 | drm_handle_vblank(dev, 1); | 524 | drm_handle_vblank(dev, 1); |
372 | 525 | ||
373 | /* check event from PCH */ | 526 | /* check event from PCH */ |
374 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) | 527 | if (de_iir & DE_PCH_EVENT) { |
375 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 528 | if (pch_iir & hotplug_mask) |
529 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
530 | pch_irq_handler(dev); | ||
531 | } | ||
376 | 532 | ||
377 | if (de_iir & DE_PCU_EVENT) { | 533 | if (de_iir & DE_PCU_EVENT) { |
378 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 534 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
379 | i915_handle_rps_change(dev); | 535 | i915_handle_rps_change(dev); |
380 | } | 536 | } |
381 | 537 | ||
538 | if (IS_GEN6(dev)) | ||
539 | gen6_pm_irq_handler(dev); | ||
540 | |||
382 | /* should clear PCH hotplug event before clear CPU irq */ | 541 | /* should clear PCH hotplug event before clear CPU irq */ |
383 | I915_WRITE(SDEIIR, pch_iir); | 542 | I915_WRITE(SDEIIR, pch_iir); |
384 | I915_WRITE(GTIIR, gt_iir); | 543 | I915_WRITE(GTIIR, gt_iir); |
@@ -386,7 +545,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
386 | 545 | ||
387 | done: | 546 | done: |
388 | I915_WRITE(DEIER, de_ier); | 547 | I915_WRITE(DEIER, de_ier); |
389 | (void)I915_READ(DEIER); | 548 | POSTING_READ(DEIER); |
390 | 549 | ||
391 | return ret; | 550 | return ret; |
392 | } | 551 | } |
@@ -422,29 +581,23 @@ static void i915_error_work_func(struct work_struct *work) | |||
422 | 581 | ||
423 | #ifdef CONFIG_DEBUG_FS | 582 | #ifdef CONFIG_DEBUG_FS |
424 | static struct drm_i915_error_object * | 583 | static struct drm_i915_error_object * |
425 | i915_error_object_create(struct drm_device *dev, | 584 | i915_error_object_create(struct drm_i915_private *dev_priv, |
426 | struct drm_gem_object *src) | 585 | struct drm_i915_gem_object *src) |
427 | { | 586 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
429 | struct drm_i915_error_object *dst; | 587 | struct drm_i915_error_object *dst; |
430 | struct drm_i915_gem_object *src_priv; | ||
431 | int page, page_count; | 588 | int page, page_count; |
432 | u32 reloc_offset; | 589 | u32 reloc_offset; |
433 | 590 | ||
434 | if (src == NULL) | 591 | if (src == NULL || src->pages == NULL) |
435 | return NULL; | 592 | return NULL; |
436 | 593 | ||
437 | src_priv = to_intel_bo(src); | 594 | page_count = src->base.size / PAGE_SIZE; |
438 | if (src_priv->pages == NULL) | ||
439 | return NULL; | ||
440 | |||
441 | page_count = src->size / PAGE_SIZE; | ||
442 | 595 | ||
443 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 596 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); |
444 | if (dst == NULL) | 597 | if (dst == NULL) |
445 | return NULL; | 598 | return NULL; |
446 | 599 | ||
447 | reloc_offset = src_priv->gtt_offset; | 600 | reloc_offset = src->gtt_offset; |
448 | for (page = 0; page < page_count; page++) { | 601 | for (page = 0; page < page_count; page++) { |
449 | unsigned long flags; | 602 | unsigned long flags; |
450 | void __iomem *s; | 603 | void __iomem *s; |
@@ -466,7 +619,7 @@ i915_error_object_create(struct drm_device *dev, | |||
466 | reloc_offset += PAGE_SIZE; | 619 | reloc_offset += PAGE_SIZE; |
467 | } | 620 | } |
468 | dst->page_count = page_count; | 621 | dst->page_count = page_count; |
469 | dst->gtt_offset = src_priv->gtt_offset; | 622 | dst->gtt_offset = src->gtt_offset; |
470 | 623 | ||
471 | return dst; | 624 | return dst; |
472 | 625 | ||
@@ -503,53 +656,98 @@ i915_error_state_free(struct drm_device *dev, | |||
503 | kfree(error); | 656 | kfree(error); |
504 | } | 657 | } |
505 | 658 | ||
506 | static u32 | 659 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
507 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | 660 | int count, |
661 | struct list_head *head) | ||
508 | { | 662 | { |
509 | u32 cmd; | 663 | struct drm_i915_gem_object *obj; |
664 | int i = 0; | ||
665 | |||
666 | list_for_each_entry(obj, head, mm_list) { | ||
667 | err->size = obj->base.size; | ||
668 | err->name = obj->base.name; | ||
669 | err->seqno = obj->last_rendering_seqno; | ||
670 | err->gtt_offset = obj->gtt_offset; | ||
671 | err->read_domains = obj->base.read_domains; | ||
672 | err->write_domain = obj->base.write_domain; | ||
673 | err->fence_reg = obj->fence_reg; | ||
674 | err->pinned = 0; | ||
675 | if (obj->pin_count > 0) | ||
676 | err->pinned = 1; | ||
677 | if (obj->user_pin_count > 0) | ||
678 | err->pinned = -1; | ||
679 | err->tiling = obj->tiling_mode; | ||
680 | err->dirty = obj->dirty; | ||
681 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | ||
682 | err->ring = obj->ring ? obj->ring->id : 0; | ||
683 | err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; | ||
684 | |||
685 | if (++i == count) | ||
686 | break; | ||
510 | 687 | ||
511 | if (IS_I830(dev) || IS_845G(dev)) | 688 | err++; |
512 | cmd = MI_BATCH_BUFFER; | 689 | } |
513 | else if (INTEL_INFO(dev)->gen >= 4) | ||
514 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
515 | MI_BATCH_NON_SECURE_I965); | ||
516 | else | ||
517 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
518 | 690 | ||
519 | return ring[0] == cmd ? ring[1] : 0; | 691 | return i; |
520 | } | 692 | } |
521 | 693 | ||
522 | static u32 | 694 | static void i915_gem_record_fences(struct drm_device *dev, |
523 | i915_ringbuffer_last_batch(struct drm_device *dev) | 695 | struct drm_i915_error_state *error) |
524 | { | 696 | { |
525 | struct drm_i915_private *dev_priv = dev->dev_private; | 697 | struct drm_i915_private *dev_priv = dev->dev_private; |
526 | u32 head, bbaddr; | 698 | int i; |
527 | u32 *ring; | 699 | |
700 | /* Fences */ | ||
701 | switch (INTEL_INFO(dev)->gen) { | ||
702 | case 6: | ||
703 | for (i = 0; i < 16; i++) | ||
704 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
705 | break; | ||
706 | case 5: | ||
707 | case 4: | ||
708 | for (i = 0; i < 16; i++) | ||
709 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
710 | break; | ||
711 | case 3: | ||
712 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
713 | for (i = 0; i < 8; i++) | ||
714 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
715 | case 2: | ||
716 | for (i = 0; i < 8; i++) | ||
717 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
718 | break; | ||
528 | 719 | ||
529 | /* Locate the current position in the ringbuffer and walk back | ||
530 | * to find the most recently dispatched batch buffer. | ||
531 | */ | ||
532 | bbaddr = 0; | ||
533 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
534 | ring = (u32 *)(dev_priv->render_ring.virtual_start + head); | ||
535 | |||
536 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | ||
537 | bbaddr = i915_get_bbaddr(dev, ring); | ||
538 | if (bbaddr) | ||
539 | break; | ||
540 | } | 720 | } |
721 | } | ||
541 | 722 | ||
542 | if (bbaddr == 0) { | 723 | static struct drm_i915_error_object * |
543 | ring = (u32 *)(dev_priv->render_ring.virtual_start | 724 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
544 | + dev_priv->render_ring.size); | 725 | struct intel_ring_buffer *ring) |
545 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | 726 | { |
546 | bbaddr = i915_get_bbaddr(dev, ring); | 727 | struct drm_i915_gem_object *obj; |
547 | if (bbaddr) | 728 | u32 seqno; |
548 | break; | 729 | |
549 | } | 730 | if (!ring->get_seqno) |
731 | return NULL; | ||
732 | |||
733 | seqno = ring->get_seqno(ring); | ||
734 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
735 | if (obj->ring != ring) | ||
736 | continue; | ||
737 | |||
738 | if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) | ||
739 | continue; | ||
740 | |||
741 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
742 | continue; | ||
743 | |||
744 | /* We need to copy these to an anonymous buffer as the simplest | ||
745 | * method to avoid being overwritten by userspace. | ||
746 | */ | ||
747 | return i915_error_object_create(dev_priv, obj); | ||
550 | } | 748 | } |
551 | 749 | ||
552 | return bbaddr; | 750 | return NULL; |
553 | } | 751 | } |
554 | 752 | ||
555 | /** | 753 | /** |
@@ -564,12 +762,10 @@ i915_ringbuffer_last_batch(struct drm_device *dev) | |||
564 | static void i915_capture_error_state(struct drm_device *dev) | 762 | static void i915_capture_error_state(struct drm_device *dev) |
565 | { | 763 | { |
566 | struct drm_i915_private *dev_priv = dev->dev_private; | 764 | struct drm_i915_private *dev_priv = dev->dev_private; |
567 | struct drm_i915_gem_object *obj_priv; | 765 | struct drm_i915_gem_object *obj; |
568 | struct drm_i915_error_state *error; | 766 | struct drm_i915_error_state *error; |
569 | struct drm_gem_object *batchbuffer[2]; | ||
570 | unsigned long flags; | 767 | unsigned long flags; |
571 | u32 bbaddr; | 768 | int i; |
572 | int count; | ||
573 | 769 | ||
574 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 770 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
575 | error = dev_priv->first_error; | 771 | error = dev_priv->first_error; |
@@ -585,20 +781,33 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
585 | 781 | ||
586 | DRM_DEBUG_DRIVER("generating error event\n"); | 782 | DRM_DEBUG_DRIVER("generating error event\n"); |
587 | 783 | ||
588 | error->seqno = | 784 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); |
589 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); | ||
590 | error->eir = I915_READ(EIR); | 785 | error->eir = I915_READ(EIR); |
591 | error->pgtbl_er = I915_READ(PGTBL_ER); | 786 | error->pgtbl_er = I915_READ(PGTBL_ER); |
592 | error->pipeastat = I915_READ(PIPEASTAT); | 787 | error->pipeastat = I915_READ(PIPEASTAT); |
593 | error->pipebstat = I915_READ(PIPEBSTAT); | 788 | error->pipebstat = I915_READ(PIPEBSTAT); |
594 | error->instpm = I915_READ(INSTPM); | 789 | error->instpm = I915_READ(INSTPM); |
595 | if (INTEL_INFO(dev)->gen < 4) { | 790 | error->error = 0; |
596 | error->ipeir = I915_READ(IPEIR); | 791 | if (INTEL_INFO(dev)->gen >= 6) { |
597 | error->ipehr = I915_READ(IPEHR); | 792 | error->error = I915_READ(ERROR_GEN6); |
598 | error->instdone = I915_READ(INSTDONE); | 793 | |
599 | error->acthd = I915_READ(ACTHD); | 794 | error->bcs_acthd = I915_READ(BCS_ACTHD); |
600 | error->bbaddr = 0; | 795 | error->bcs_ipehr = I915_READ(BCS_IPEHR); |
601 | } else { | 796 | error->bcs_ipeir = I915_READ(BCS_IPEIR); |
797 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
798 | error->bcs_seqno = 0; | ||
799 | if (dev_priv->ring[BCS].get_seqno) | ||
800 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
801 | |||
802 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
803 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
804 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
805 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
806 | error->vcs_seqno = 0; | ||
807 | if (dev_priv->ring[VCS].get_seqno) | ||
808 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
809 | } | ||
810 | if (INTEL_INFO(dev)->gen >= 4) { | ||
602 | error->ipeir = I915_READ(IPEIR_I965); | 811 | error->ipeir = I915_READ(IPEIR_I965); |
603 | error->ipehr = I915_READ(IPEHR_I965); | 812 | error->ipehr = I915_READ(IPEHR_I965); |
604 | error->instdone = I915_READ(INSTDONE_I965); | 813 | error->instdone = I915_READ(INSTDONE_I965); |
@@ -606,118 +815,63 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
606 | error->instdone1 = I915_READ(INSTDONE1); | 815 | error->instdone1 = I915_READ(INSTDONE1); |
607 | error->acthd = I915_READ(ACTHD_I965); | 816 | error->acthd = I915_READ(ACTHD_I965); |
608 | error->bbaddr = I915_READ64(BB_ADDR); | 817 | error->bbaddr = I915_READ64(BB_ADDR); |
818 | } else { | ||
819 | error->ipeir = I915_READ(IPEIR); | ||
820 | error->ipehr = I915_READ(IPEHR); | ||
821 | error->instdone = I915_READ(INSTDONE); | ||
822 | error->acthd = I915_READ(ACTHD); | ||
823 | error->bbaddr = 0; | ||
609 | } | 824 | } |
825 | i915_gem_record_fences(dev, error); | ||
610 | 826 | ||
611 | bbaddr = i915_ringbuffer_last_batch(dev); | 827 | /* Record the active batchbuffers */ |
612 | 828 | for (i = 0; i < I915_NUM_RINGS; i++) | |
613 | /* Grab the current batchbuffer, most likely to have crashed. */ | 829 | error->batchbuffer[i] = |
614 | batchbuffer[0] = NULL; | 830 | i915_error_first_batchbuffer(dev_priv, |
615 | batchbuffer[1] = NULL; | 831 | &dev_priv->ring[i]); |
616 | count = 0; | ||
617 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | ||
618 | struct drm_gem_object *obj = &obj_priv->base; | ||
619 | |||
620 | if (batchbuffer[0] == NULL && | ||
621 | bbaddr >= obj_priv->gtt_offset && | ||
622 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
623 | batchbuffer[0] = obj; | ||
624 | |||
625 | if (batchbuffer[1] == NULL && | ||
626 | error->acthd >= obj_priv->gtt_offset && | ||
627 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
628 | batchbuffer[1] = obj; | ||
629 | |||
630 | count++; | ||
631 | } | ||
632 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
633 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
634 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { | ||
635 | struct drm_gem_object *obj = &obj_priv->base; | ||
636 | |||
637 | if (batchbuffer[0] == NULL && | ||
638 | bbaddr >= obj_priv->gtt_offset && | ||
639 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
640 | batchbuffer[0] = obj; | ||
641 | |||
642 | if (batchbuffer[1] == NULL && | ||
643 | error->acthd >= obj_priv->gtt_offset && | ||
644 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
645 | batchbuffer[1] = obj; | ||
646 | |||
647 | if (batchbuffer[0] && batchbuffer[1]) | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
652 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { | ||
653 | struct drm_gem_object *obj = &obj_priv->base; | ||
654 | |||
655 | if (batchbuffer[0] == NULL && | ||
656 | bbaddr >= obj_priv->gtt_offset && | ||
657 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
658 | batchbuffer[0] = obj; | ||
659 | |||
660 | if (batchbuffer[1] == NULL && | ||
661 | error->acthd >= obj_priv->gtt_offset && | ||
662 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
663 | batchbuffer[1] = obj; | ||
664 | |||
665 | if (batchbuffer[0] && batchbuffer[1]) | ||
666 | break; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | /* We need to copy these to an anonymous buffer as the simplest | ||
671 | * method to avoid being overwritten by userspace. | ||
672 | */ | ||
673 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
674 | if (batchbuffer[1] != batchbuffer[0]) | ||
675 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
676 | else | ||
677 | error->batchbuffer[1] = NULL; | ||
678 | 832 | ||
679 | /* Record the ringbuffer */ | 833 | /* Record the ringbuffer */ |
680 | error->ringbuffer = i915_error_object_create(dev, | 834 | error->ringbuffer = i915_error_object_create(dev_priv, |
681 | dev_priv->render_ring.gem_object); | 835 | dev_priv->ring[RCS].obj); |
682 | 836 | ||
683 | /* Record buffers on the active list. */ | 837 | /* Record buffers on the active and pinned lists. */ |
684 | error->active_bo = NULL; | 838 | error->active_bo = NULL; |
685 | error->active_bo_count = 0; | 839 | error->pinned_bo = NULL; |
686 | 840 | ||
687 | if (count) | 841 | i = 0; |
688 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | 842 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
689 | GFP_ATOMIC); | 843 | i++; |
844 | error->active_bo_count = i; | ||
845 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | ||
846 | i++; | ||
847 | error->pinned_bo_count = i - error->active_bo_count; | ||
690 | 848 | ||
691 | if (error->active_bo) { | 849 | error->active_bo = NULL; |
692 | int i = 0; | 850 | error->pinned_bo = NULL; |
693 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 851 | if (i) { |
694 | struct drm_gem_object *obj = &obj_priv->base; | 852 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
695 | 853 | GFP_ATOMIC); | |
696 | error->active_bo[i].size = obj->size; | 854 | if (error->active_bo) |
697 | error->active_bo[i].name = obj->name; | 855 | error->pinned_bo = |
698 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | 856 | error->active_bo + error->active_bo_count; |
699 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
700 | error->active_bo[i].read_domains = obj->read_domains; | ||
701 | error->active_bo[i].write_domain = obj->write_domain; | ||
702 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
703 | error->active_bo[i].pinned = 0; | ||
704 | if (obj_priv->pin_count > 0) | ||
705 | error->active_bo[i].pinned = 1; | ||
706 | if (obj_priv->user_pin_count > 0) | ||
707 | error->active_bo[i].pinned = -1; | ||
708 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
709 | error->active_bo[i].dirty = obj_priv->dirty; | ||
710 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
711 | |||
712 | if (++i == count) | ||
713 | break; | ||
714 | } | ||
715 | error->active_bo_count = i; | ||
716 | } | 857 | } |
717 | 858 | ||
859 | if (error->active_bo) | ||
860 | error->active_bo_count = | ||
861 | capture_bo_list(error->active_bo, | ||
862 | error->active_bo_count, | ||
863 | &dev_priv->mm.active_list); | ||
864 | |||
865 | if (error->pinned_bo) | ||
866 | error->pinned_bo_count = | ||
867 | capture_bo_list(error->pinned_bo, | ||
868 | error->pinned_bo_count, | ||
869 | &dev_priv->mm.pinned_list); | ||
870 | |||
718 | do_gettimeofday(&error->time); | 871 | do_gettimeofday(&error->time); |
719 | 872 | ||
720 | error->overlay = intel_overlay_capture_error_state(dev); | 873 | error->overlay = intel_overlay_capture_error_state(dev); |
874 | error->display = intel_display_capture_error_state(dev); | ||
721 | 875 | ||
722 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 876 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
723 | if (dev_priv->first_error == NULL) { | 877 | if (dev_priv->first_error == NULL) { |
@@ -775,7 +929,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
775 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 929 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
776 | I915_READ(ACTHD_I965)); | 930 | I915_READ(ACTHD_I965)); |
777 | I915_WRITE(IPEIR_I965, ipeir); | 931 | I915_WRITE(IPEIR_I965, ipeir); |
778 | (void)I915_READ(IPEIR_I965); | 932 | POSTING_READ(IPEIR_I965); |
779 | } | 933 | } |
780 | if (eir & GM45_ERROR_PAGE_TABLE) { | 934 | if (eir & GM45_ERROR_PAGE_TABLE) { |
781 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 935 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
@@ -783,7 +937,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
783 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 937 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
784 | pgtbl_err); | 938 | pgtbl_err); |
785 | I915_WRITE(PGTBL_ER, pgtbl_err); | 939 | I915_WRITE(PGTBL_ER, pgtbl_err); |
786 | (void)I915_READ(PGTBL_ER); | 940 | POSTING_READ(PGTBL_ER); |
787 | } | 941 | } |
788 | } | 942 | } |
789 | 943 | ||
@@ -794,7 +948,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
794 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 948 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
795 | pgtbl_err); | 949 | pgtbl_err); |
796 | I915_WRITE(PGTBL_ER, pgtbl_err); | 950 | I915_WRITE(PGTBL_ER, pgtbl_err); |
797 | (void)I915_READ(PGTBL_ER); | 951 | POSTING_READ(PGTBL_ER); |
798 | } | 952 | } |
799 | } | 953 | } |
800 | 954 | ||
@@ -825,7 +979,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
825 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 979 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
826 | I915_READ(ACTHD)); | 980 | I915_READ(ACTHD)); |
827 | I915_WRITE(IPEIR, ipeir); | 981 | I915_WRITE(IPEIR, ipeir); |
828 | (void)I915_READ(IPEIR); | 982 | POSTING_READ(IPEIR); |
829 | } else { | 983 | } else { |
830 | u32 ipeir = I915_READ(IPEIR_I965); | 984 | u32 ipeir = I915_READ(IPEIR_I965); |
831 | 985 | ||
@@ -842,12 +996,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
842 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 996 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
843 | I915_READ(ACTHD_I965)); | 997 | I915_READ(ACTHD_I965)); |
844 | I915_WRITE(IPEIR_I965, ipeir); | 998 | I915_WRITE(IPEIR_I965, ipeir); |
845 | (void)I915_READ(IPEIR_I965); | 999 | POSTING_READ(IPEIR_I965); |
846 | } | 1000 | } |
847 | } | 1001 | } |
848 | 1002 | ||
849 | I915_WRITE(EIR, eir); | 1003 | I915_WRITE(EIR, eir); |
850 | (void)I915_READ(EIR); | 1004 | POSTING_READ(EIR); |
851 | eir = I915_READ(EIR); | 1005 | eir = I915_READ(EIR); |
852 | if (eir) { | 1006 | if (eir) { |
853 | /* | 1007 | /* |
@@ -870,7 +1024,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
870 | * so userspace knows something bad happened (should trigger collection | 1024 | * so userspace knows something bad happened (should trigger collection |
871 | * of a ring dump etc.). | 1025 | * of a ring dump etc.). |
872 | */ | 1026 | */ |
873 | static void i915_handle_error(struct drm_device *dev, bool wedged) | 1027 | void i915_handle_error(struct drm_device *dev, bool wedged) |
874 | { | 1028 | { |
875 | struct drm_i915_private *dev_priv = dev->dev_private; | 1029 | struct drm_i915_private *dev_priv = dev->dev_private; |
876 | 1030 | ||
@@ -884,11 +1038,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
884 | /* | 1038 | /* |
885 | * Wakeup waiting processes so they don't hang | 1039 | * Wakeup waiting processes so they don't hang |
886 | */ | 1040 | */ |
887 | wake_up_all(&dev_priv->render_ring.irq_queue); | 1041 | wake_up_all(&dev_priv->ring[RCS].irq_queue); |
888 | if (HAS_BSD(dev)) | 1042 | if (HAS_BSD(dev)) |
889 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | 1043 | wake_up_all(&dev_priv->ring[VCS].irq_queue); |
890 | if (HAS_BLT(dev)) | 1044 | if (HAS_BLT(dev)) |
891 | wake_up_all(&dev_priv->blt_ring.irq_queue); | 1045 | wake_up_all(&dev_priv->ring[BCS].irq_queue); |
892 | } | 1046 | } |
893 | 1047 | ||
894 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1048 | queue_work(dev_priv->wq, &dev_priv->error_work); |
@@ -899,7 +1053,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
899 | drm_i915_private_t *dev_priv = dev->dev_private; | 1053 | drm_i915_private_t *dev_priv = dev->dev_private; |
900 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1054 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
901 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1055 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
902 | struct drm_i915_gem_object *obj_priv; | 1056 | struct drm_i915_gem_object *obj; |
903 | struct intel_unpin_work *work; | 1057 | struct intel_unpin_work *work; |
904 | unsigned long flags; | 1058 | unsigned long flags; |
905 | bool stall_detected; | 1059 | bool stall_detected; |
@@ -918,13 +1072,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
918 | } | 1072 | } |
919 | 1073 | ||
920 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1074 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
921 | obj_priv = to_intel_bo(work->pending_flip_obj); | 1075 | obj = work->pending_flip_obj; |
922 | if (INTEL_INFO(dev)->gen >= 4) { | 1076 | if (INTEL_INFO(dev)->gen >= 4) { |
923 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 1077 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; |
924 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | 1078 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
925 | } else { | 1079 | } else { |
926 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 1080 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; |
927 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | 1081 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
928 | crtc->y * crtc->fb->pitch + | 1082 | crtc->y * crtc->fb->pitch + |
929 | crtc->x * crtc->fb->bits_per_pixel/8); | 1083 | crtc->x * crtc->fb->bits_per_pixel/8); |
930 | } | 1084 | } |
@@ -970,7 +1124,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
970 | * It doesn't set the bit in iir again, but it still produces | 1124 | * It doesn't set the bit in iir again, but it still produces |
971 | * interrupts (for non-MSI). | 1125 | * interrupts (for non-MSI). |
972 | */ | 1126 | */ |
973 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1127 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
974 | pipea_stats = I915_READ(PIPEASTAT); | 1128 | pipea_stats = I915_READ(PIPEASTAT); |
975 | pipeb_stats = I915_READ(PIPEBSTAT); | 1129 | pipeb_stats = I915_READ(PIPEBSTAT); |
976 | 1130 | ||
@@ -993,7 +1147,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
993 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 1147 | I915_WRITE(PIPEBSTAT, pipeb_stats); |
994 | irq_received = 1; | 1148 | irq_received = 1; |
995 | } | 1149 | } |
996 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1150 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
997 | 1151 | ||
998 | if (!irq_received) | 1152 | if (!irq_received) |
999 | break; | 1153 | break; |
@@ -1026,9 +1180,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1026 | } | 1180 | } |
1027 | 1181 | ||
1028 | if (iir & I915_USER_INTERRUPT) | 1182 | if (iir & I915_USER_INTERRUPT) |
1029 | notify_ring(dev, &dev_priv->render_ring); | 1183 | notify_ring(dev, &dev_priv->ring[RCS]); |
1030 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | 1184 | if (iir & I915_BSD_USER_INTERRUPT) |
1031 | notify_ring(dev, &dev_priv->bsd_ring); | 1185 | notify_ring(dev, &dev_priv->ring[VCS]); |
1032 | 1186 | ||
1033 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 1187 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
1034 | intel_prepare_page_flip(dev, 0); | 1188 | intel_prepare_page_flip(dev, 0); |
@@ -1042,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1042 | intel_finish_page_flip_plane(dev, 1); | 1196 | intel_finish_page_flip_plane(dev, 1); |
1043 | } | 1197 | } |
1044 | 1198 | ||
1045 | if (pipea_stats & vblank_status) { | 1199 | if (pipea_stats & vblank_status && |
1200 | drm_handle_vblank(dev, 0)) { | ||
1046 | vblank++; | 1201 | vblank++; |
1047 | drm_handle_vblank(dev, 0); | ||
1048 | if (!dev_priv->flip_pending_is_done) { | 1202 | if (!dev_priv->flip_pending_is_done) { |
1049 | i915_pageflip_stall_check(dev, 0); | 1203 | i915_pageflip_stall_check(dev, 0); |
1050 | intel_finish_page_flip(dev, 0); | 1204 | intel_finish_page_flip(dev, 0); |
1051 | } | 1205 | } |
1052 | } | 1206 | } |
1053 | 1207 | ||
1054 | if (pipeb_stats & vblank_status) { | 1208 | if (pipeb_stats & vblank_status && |
1209 | drm_handle_vblank(dev, 1)) { | ||
1055 | vblank++; | 1210 | vblank++; |
1056 | drm_handle_vblank(dev, 1); | ||
1057 | if (!dev_priv->flip_pending_is_done) { | 1211 | if (!dev_priv->flip_pending_is_done) { |
1058 | i915_pageflip_stall_check(dev, 1); | 1212 | i915_pageflip_stall_check(dev, 1); |
1059 | intel_finish_page_flip(dev, 1); | 1213 | intel_finish_page_flip(dev, 1); |
@@ -1101,12 +1255,13 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1101 | if (master_priv->sarea_priv) | 1255 | if (master_priv->sarea_priv) |
1102 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 1256 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
1103 | 1257 | ||
1104 | BEGIN_LP_RING(4); | 1258 | if (BEGIN_LP_RING(4) == 0) { |
1105 | OUT_RING(MI_STORE_DWORD_INDEX); | 1259 | OUT_RING(MI_STORE_DWORD_INDEX); |
1106 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1260 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1107 | OUT_RING(dev_priv->counter); | 1261 | OUT_RING(dev_priv->counter); |
1108 | OUT_RING(MI_USER_INTERRUPT); | 1262 | OUT_RING(MI_USER_INTERRUPT); |
1109 | ADVANCE_LP_RING(); | 1263 | ADVANCE_LP_RING(); |
1264 | } | ||
1110 | 1265 | ||
1111 | return dev_priv->counter; | 1266 | return dev_priv->counter; |
1112 | } | 1267 | } |
@@ -1114,12 +1269,11 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1114 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | 1269 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) |
1115 | { | 1270 | { |
1116 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1271 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1117 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1272 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1118 | |||
1119 | if (dev_priv->trace_irq_seqno == 0) | ||
1120 | render_ring->user_irq_get(dev, render_ring); | ||
1121 | 1273 | ||
1122 | dev_priv->trace_irq_seqno = seqno; | 1274 | if (dev_priv->trace_irq_seqno == 0 && |
1275 | ring->irq_get(ring)) | ||
1276 | dev_priv->trace_irq_seqno = seqno; | ||
1123 | } | 1277 | } |
1124 | 1278 | ||
1125 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1279 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
@@ -1127,7 +1281,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1127 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1281 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1128 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1282 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1129 | int ret = 0; | 1283 | int ret = 0; |
1130 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1284 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1131 | 1285 | ||
1132 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 1286 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
1133 | READ_BREADCRUMB(dev_priv)); | 1287 | READ_BREADCRUMB(dev_priv)); |
@@ -1141,10 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1141 | if (master_priv->sarea_priv) | 1295 | if (master_priv->sarea_priv) |
1142 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1296 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1143 | 1297 | ||
1144 | render_ring->user_irq_get(dev, render_ring); | 1298 | if (ring->irq_get(ring)) { |
1145 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, | 1299 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
1146 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1300 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1147 | render_ring->user_irq_put(dev, render_ring); | 1301 | ring->irq_put(ring); |
1302 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | ||
1303 | ret = -EBUSY; | ||
1148 | 1304 | ||
1149 | if (ret == -EBUSY) { | 1305 | if (ret == -EBUSY) { |
1150 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1306 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1163,7 +1319,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
1163 | drm_i915_irq_emit_t *emit = data; | 1319 | drm_i915_irq_emit_t *emit = data; |
1164 | int result; | 1320 | int result; |
1165 | 1321 | ||
1166 | if (!dev_priv || !dev_priv->render_ring.virtual_start) { | 1322 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
1167 | DRM_ERROR("called with no initialization\n"); | 1323 | DRM_ERROR("called with no initialization\n"); |
1168 | return -EINVAL; | 1324 | return -EINVAL; |
1169 | } | 1325 | } |
@@ -1209,9 +1365,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
1209 | if (!i915_pipe_enabled(dev, pipe)) | 1365 | if (!i915_pipe_enabled(dev, pipe)) |
1210 | return -EINVAL; | 1366 | return -EINVAL; |
1211 | 1367 | ||
1212 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1368 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1213 | if (HAS_PCH_SPLIT(dev)) | 1369 | if (HAS_PCH_SPLIT(dev)) |
1214 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | 1370 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? |
1215 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1371 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1216 | else if (INTEL_INFO(dev)->gen >= 4) | 1372 | else if (INTEL_INFO(dev)->gen >= 4) |
1217 | i915_enable_pipestat(dev_priv, pipe, | 1373 | i915_enable_pipestat(dev_priv, pipe, |
@@ -1219,7 +1375,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
1219 | else | 1375 | else |
1220 | i915_enable_pipestat(dev_priv, pipe, | 1376 | i915_enable_pipestat(dev_priv, pipe, |
1221 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1377 | PIPE_VBLANK_INTERRUPT_ENABLE); |
1222 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1378 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1223 | return 0; | 1379 | return 0; |
1224 | } | 1380 | } |
1225 | 1381 | ||
@@ -1231,15 +1387,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
1231 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1387 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1232 | unsigned long irqflags; | 1388 | unsigned long irqflags; |
1233 | 1389 | ||
1234 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1390 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1235 | if (HAS_PCH_SPLIT(dev)) | 1391 | if (HAS_PCH_SPLIT(dev)) |
1236 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1392 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1237 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1393 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1238 | else | 1394 | else |
1239 | i915_disable_pipestat(dev_priv, pipe, | 1395 | i915_disable_pipestat(dev_priv, pipe, |
1240 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1396 | PIPE_VBLANK_INTERRUPT_ENABLE | |
1241 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1397 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1242 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1398 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1243 | } | 1399 | } |
1244 | 1400 | ||
1245 | void i915_enable_interrupt (struct drm_device *dev) | 1401 | void i915_enable_interrupt (struct drm_device *dev) |
@@ -1306,12 +1462,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
1306 | return -EINVAL; | 1462 | return -EINVAL; |
1307 | } | 1463 | } |
1308 | 1464 | ||
1309 | static struct drm_i915_gem_request * | 1465 | static u32 |
1310 | i915_get_tail_request(struct drm_device *dev) | 1466 | ring_last_seqno(struct intel_ring_buffer *ring) |
1311 | { | 1467 | { |
1312 | drm_i915_private_t *dev_priv = dev->dev_private; | 1468 | return list_entry(ring->request_list.prev, |
1313 | return list_entry(dev_priv->render_ring.request_list.prev, | 1469 | struct drm_i915_gem_request, list)->seqno; |
1314 | struct drm_i915_gem_request, list); | 1470 | } |
1471 | |||
1472 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | ||
1473 | { | ||
1474 | if (list_empty(&ring->request_list) || | ||
1475 | i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { | ||
1476 | /* Issue a wake-up to catch stuck h/w. */ | ||
1477 | if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { | ||
1478 | DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", | ||
1479 | ring->name, | ||
1480 | ring->waiting_seqno, | ||
1481 | ring->get_seqno(ring)); | ||
1482 | wake_up_all(&ring->irq_queue); | ||
1483 | *err = true; | ||
1484 | } | ||
1485 | return true; | ||
1486 | } | ||
1487 | return false; | ||
1488 | } | ||
1489 | |||
1490 | static bool kick_ring(struct intel_ring_buffer *ring) | ||
1491 | { | ||
1492 | struct drm_device *dev = ring->dev; | ||
1493 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1494 | u32 tmp = I915_READ_CTL(ring); | ||
1495 | if (tmp & RING_WAIT) { | ||
1496 | DRM_ERROR("Kicking stuck wait on %s\n", | ||
1497 | ring->name); | ||
1498 | I915_WRITE_CTL(ring, tmp); | ||
1499 | return true; | ||
1500 | } | ||
1501 | if (IS_GEN6(dev) && | ||
1502 | (tmp & RING_WAIT_SEMAPHORE)) { | ||
1503 | DRM_ERROR("Kicking stuck semaphore on %s\n", | ||
1504 | ring->name); | ||
1505 | I915_WRITE_CTL(ring, tmp); | ||
1506 | return true; | ||
1507 | } | ||
1508 | return false; | ||
1315 | } | 1509 | } |
1316 | 1510 | ||
1317 | /** | 1511 | /** |
@@ -1325,6 +1519,17 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1325 | struct drm_device *dev = (struct drm_device *)data; | 1519 | struct drm_device *dev = (struct drm_device *)data; |
1326 | drm_i915_private_t *dev_priv = dev->dev_private; | 1520 | drm_i915_private_t *dev_priv = dev->dev_private; |
1327 | uint32_t acthd, instdone, instdone1; | 1521 | uint32_t acthd, instdone, instdone1; |
1522 | bool err = false; | ||
1523 | |||
1524 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1525 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && | ||
1526 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | ||
1527 | i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { | ||
1528 | dev_priv->hangcheck_count = 0; | ||
1529 | if (err) | ||
1530 | goto repeat; | ||
1531 | return; | ||
1532 | } | ||
1328 | 1533 | ||
1329 | if (INTEL_INFO(dev)->gen < 4) { | 1534 | if (INTEL_INFO(dev)->gen < 4) { |
1330 | acthd = I915_READ(ACTHD); | 1535 | acthd = I915_READ(ACTHD); |
@@ -1336,38 +1541,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1336 | instdone1 = I915_READ(INSTDONE1); | 1541 | instdone1 = I915_READ(INSTDONE1); |
1337 | } | 1542 | } |
1338 | 1543 | ||
1339 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1340 | if (list_empty(&dev_priv->render_ring.request_list) || | ||
1341 | i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), | ||
1342 | i915_get_tail_request(dev)->seqno)) { | ||
1343 | bool missed_wakeup = false; | ||
1344 | |||
1345 | dev_priv->hangcheck_count = 0; | ||
1346 | |||
1347 | /* Issue a wake-up to catch stuck h/w. */ | ||
1348 | if (dev_priv->render_ring.waiting_gem_seqno && | ||
1349 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { | ||
1350 | wake_up_all(&dev_priv->render_ring.irq_queue); | ||
1351 | missed_wakeup = true; | ||
1352 | } | ||
1353 | |||
1354 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1355 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1356 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | ||
1357 | missed_wakeup = true; | ||
1358 | } | ||
1359 | |||
1360 | if (dev_priv->blt_ring.waiting_gem_seqno && | ||
1361 | waitqueue_active(&dev_priv->blt_ring.irq_queue)) { | ||
1362 | wake_up_all(&dev_priv->blt_ring.irq_queue); | ||
1363 | missed_wakeup = true; | ||
1364 | } | ||
1365 | |||
1366 | if (missed_wakeup) | ||
1367 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1368 | return; | ||
1369 | } | ||
1370 | |||
1371 | if (dev_priv->last_acthd == acthd && | 1544 | if (dev_priv->last_acthd == acthd && |
1372 | dev_priv->last_instdone == instdone && | 1545 | dev_priv->last_instdone == instdone && |
1373 | dev_priv->last_instdone1 == instdone1) { | 1546 | dev_priv->last_instdone1 == instdone1) { |
@@ -1380,12 +1553,17 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1380 | * and break the hang. This should work on | 1553 | * and break the hang. This should work on |
1381 | * all but the second generation chipsets. | 1554 | * all but the second generation chipsets. |
1382 | */ | 1555 | */ |
1383 | u32 tmp = I915_READ(PRB0_CTL); | 1556 | |
1384 | if (tmp & RING_WAIT) { | 1557 | if (kick_ring(&dev_priv->ring[RCS])) |
1385 | I915_WRITE(PRB0_CTL, tmp); | 1558 | goto repeat; |
1386 | POSTING_READ(PRB0_CTL); | 1559 | |
1387 | goto out; | 1560 | if (HAS_BSD(dev) && |
1388 | } | 1561 | kick_ring(&dev_priv->ring[VCS])) |
1562 | goto repeat; | ||
1563 | |||
1564 | if (HAS_BLT(dev) && | ||
1565 | kick_ring(&dev_priv->ring[BCS])) | ||
1566 | goto repeat; | ||
1389 | } | 1567 | } |
1390 | 1568 | ||
1391 | i915_handle_error(dev, true); | 1569 | i915_handle_error(dev, true); |
@@ -1399,7 +1577,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1399 | dev_priv->last_instdone1 = instdone1; | 1577 | dev_priv->last_instdone1 = instdone1; |
1400 | } | 1578 | } |
1401 | 1579 | ||
1402 | out: | 1580 | repeat: |
1403 | /* Reset timer case chip hangs without another request being added */ | 1581 | /* Reset timer case chip hangs without another request being added */ |
1404 | mod_timer(&dev_priv->hangcheck_timer, | 1582 | mod_timer(&dev_priv->hangcheck_timer, |
1405 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1583 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -1417,17 +1595,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1417 | 1595 | ||
1418 | I915_WRITE(DEIMR, 0xffffffff); | 1596 | I915_WRITE(DEIMR, 0xffffffff); |
1419 | I915_WRITE(DEIER, 0x0); | 1597 | I915_WRITE(DEIER, 0x0); |
1420 | (void) I915_READ(DEIER); | 1598 | POSTING_READ(DEIER); |
1421 | 1599 | ||
1422 | /* and GT */ | 1600 | /* and GT */ |
1423 | I915_WRITE(GTIMR, 0xffffffff); | 1601 | I915_WRITE(GTIMR, 0xffffffff); |
1424 | I915_WRITE(GTIER, 0x0); | 1602 | I915_WRITE(GTIER, 0x0); |
1425 | (void) I915_READ(GTIER); | 1603 | POSTING_READ(GTIER); |
1426 | 1604 | ||
1427 | /* south display irq */ | 1605 | /* south display irq */ |
1428 | I915_WRITE(SDEIMR, 0xffffffff); | 1606 | I915_WRITE(SDEIMR, 0xffffffff); |
1429 | I915_WRITE(SDEIER, 0x0); | 1607 | I915_WRITE(SDEIER, 0x0); |
1430 | (void) I915_READ(SDEIER); | 1608 | POSTING_READ(SDEIER); |
1431 | } | 1609 | } |
1432 | 1610 | ||
1433 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1611 | static int ironlake_irq_postinstall(struct drm_device *dev) |
@@ -1436,38 +1614,34 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1436 | /* enable kind of interrupts always enabled */ | 1614 | /* enable kind of interrupts always enabled */ |
1437 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1615 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1438 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1616 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1439 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1617 | u32 render_irqs; |
1440 | u32 hotplug_mask; | 1618 | u32 hotplug_mask; |
1441 | 1619 | ||
1442 | dev_priv->irq_mask_reg = ~display_mask; | 1620 | dev_priv->irq_mask = ~display_mask; |
1443 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | ||
1444 | 1621 | ||
1445 | /* should always can generate irq */ | 1622 | /* should always can generate irq */ |
1446 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1623 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
1447 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 1624 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
1448 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1625 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
1449 | (void) I915_READ(DEIER); | 1626 | POSTING_READ(DEIER); |
1450 | |||
1451 | if (IS_GEN6(dev)) { | ||
1452 | render_mask = | ||
1453 | GT_PIPE_NOTIFY | | ||
1454 | GT_GEN6_BSD_USER_INTERRUPT | | ||
1455 | GT_BLT_USER_INTERRUPT; | ||
1456 | } | ||
1457 | 1627 | ||
1458 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1628 | dev_priv->gt_irq_mask = ~0; |
1459 | dev_priv->gt_irq_enable_reg = render_mask; | ||
1460 | 1629 | ||
1461 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1630 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1462 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | 1631 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1463 | if (IS_GEN6(dev)) { | ||
1464 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | ||
1465 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); | ||
1466 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1467 | } | ||
1468 | 1632 | ||
1469 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1633 | if (IS_GEN6(dev)) |
1470 | (void) I915_READ(GTIER); | 1634 | render_irqs = |
1635 | GT_USER_INTERRUPT | | ||
1636 | GT_GEN6_BSD_USER_INTERRUPT | | ||
1637 | GT_BLT_USER_INTERRUPT; | ||
1638 | else | ||
1639 | render_irqs = | ||
1640 | GT_USER_INTERRUPT | | ||
1641 | GT_PIPE_NOTIFY | | ||
1642 | GT_BSD_USER_INTERRUPT; | ||
1643 | I915_WRITE(GTIER, render_irqs); | ||
1644 | POSTING_READ(GTIER); | ||
1471 | 1645 | ||
1472 | if (HAS_PCH_CPT(dev)) { | 1646 | if (HAS_PCH_CPT(dev)) { |
1473 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | | 1647 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | |
@@ -1475,15 +1649,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1475 | } else { | 1649 | } else { |
1476 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1477 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | ||
1653 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1654 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1478 | } | 1655 | } |
1479 | 1656 | ||
1480 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
1481 | dev_priv->pch_irq_enable_reg = hotplug_mask; | ||
1482 | 1658 | ||
1483 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1659 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
1484 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); | 1660 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
1485 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1661 | I915_WRITE(SDEIER, hotplug_mask); |
1486 | (void) I915_READ(SDEIER); | 1662 | POSTING_READ(SDEIER); |
1487 | 1663 | ||
1488 | if (IS_IRONLAKE_M(dev)) { | 1664 | if (IS_IRONLAKE_M(dev)) { |
1489 | /* Clear & enable PCU event interrupts */ | 1665 | /* Clear & enable PCU event interrupts */ |
@@ -1519,7 +1695,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1519 | I915_WRITE(PIPEBSTAT, 0); | 1695 | I915_WRITE(PIPEBSTAT, 0); |
1520 | I915_WRITE(IMR, 0xffffffff); | 1696 | I915_WRITE(IMR, 0xffffffff); |
1521 | I915_WRITE(IER, 0x0); | 1697 | I915_WRITE(IER, 0x0); |
1522 | (void) I915_READ(IER); | 1698 | POSTING_READ(IER); |
1523 | } | 1699 | } |
1524 | 1700 | ||
1525 | /* | 1701 | /* |
@@ -1532,11 +1708,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1532 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 1708 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
1533 | u32 error_mask; | 1709 | u32 error_mask; |
1534 | 1710 | ||
1535 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); | 1711 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); |
1536 | if (HAS_BSD(dev)) | 1712 | if (HAS_BSD(dev)) |
1537 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | 1713 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); |
1538 | if (HAS_BLT(dev)) | 1714 | if (HAS_BLT(dev)) |
1539 | DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); | 1715 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); |
1540 | 1716 | ||
1541 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1717 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1542 | 1718 | ||
@@ -1544,7 +1720,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1544 | return ironlake_irq_postinstall(dev); | 1720 | return ironlake_irq_postinstall(dev); |
1545 | 1721 | ||
1546 | /* Unmask the interrupts that we always want on. */ | 1722 | /* Unmask the interrupts that we always want on. */ |
1547 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | 1723 | dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; |
1548 | 1724 | ||
1549 | dev_priv->pipestat[0] = 0; | 1725 | dev_priv->pipestat[0] = 0; |
1550 | dev_priv->pipestat[1] = 0; | 1726 | dev_priv->pipestat[1] = 0; |
@@ -1553,7 +1729,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1553 | /* Enable in IER... */ | 1729 | /* Enable in IER... */ |
1554 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1730 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1555 | /* and unmask in IMR */ | 1731 | /* and unmask in IMR */ |
1556 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; | 1732 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
1557 | } | 1733 | } |
1558 | 1734 | ||
1559 | /* | 1735 | /* |
@@ -1571,9 +1747,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1571 | } | 1747 | } |
1572 | I915_WRITE(EMR, error_mask); | 1748 | I915_WRITE(EMR, error_mask); |
1573 | 1749 | ||
1574 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1750 | I915_WRITE(IMR, dev_priv->irq_mask); |
1575 | I915_WRITE(IER, enable_mask); | 1751 | I915_WRITE(IER, enable_mask); |
1576 | (void) I915_READ(IER); | 1752 | POSTING_READ(IER); |
1577 | 1753 | ||
1578 | if (I915_HAS_HOTPLUG(dev)) { | 1754 | if (I915_HAS_HOTPLUG(dev)) { |
1579 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1755 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |