diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 1504 |
1 files changed, 978 insertions, 526 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 744225ebb4b2..3b03f85ea627 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,87 +64,37 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask_reg &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
73 | (void) I915_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask_reg |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
83 | (void) I915_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
90 | { | 70 | { |
91 | if ((dev_priv->irq_mask_reg & mask) != 0) { | 71 | if ((dev_priv->irq_mask & mask) != 0) { |
92 | dev_priv->irq_mask_reg &= ~mask; | 72 | dev_priv->irq_mask &= ~mask; |
93 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 73 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
94 | (void) I915_READ(DEIMR); | 74 | POSTING_READ(DEIMR); |
95 | } | 75 | } |
96 | } | 76 | } |
97 | 77 | ||
98 | static inline void | 78 | static inline void |
99 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 79 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
100 | { | 80 | { |
101 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 81 | if ((dev_priv->irq_mask & mask) != mask) { |
102 | dev_priv->irq_mask_reg |= mask; | 82 | dev_priv->irq_mask |= mask; |
103 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 83 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
104 | (void) I915_READ(DEIMR); | 84 | POSTING_READ(DEIMR); |
105 | } | ||
106 | } | ||
107 | |||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask_reg & mask) != 0) { | ||
112 | dev_priv->irq_mask_reg &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
114 | (void) I915_READ(IMR); | ||
115 | } | 85 | } |
116 | } | 86 | } |
117 | 87 | ||
118 | void | 88 | void |
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask_reg & mask) != mask) { | ||
122 | dev_priv->irq_mask_reg |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
124 | (void) I915_READ(IMR); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline u32 | ||
129 | i915_pipestat(int pipe) | ||
130 | { | ||
131 | if (pipe == 0) | ||
132 | return PIPEASTAT; | ||
133 | if (pipe == 1) | ||
134 | return PIPEBSTAT; | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | void | ||
139 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 89 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
140 | { | 90 | { |
141 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | 91 | if ((dev_priv->pipestat[pipe] & mask) != mask) { |
142 | u32 reg = i915_pipestat(pipe); | 92 | u32 reg = PIPESTAT(pipe); |
143 | 93 | ||
144 | dev_priv->pipestat[pipe] |= mask; | 94 | dev_priv->pipestat[pipe] |= mask; |
145 | /* Enable the interrupt, clear any pending status */ | 95 | /* Enable the interrupt, clear any pending status */ |
146 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | 96 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); |
147 | (void) I915_READ(reg); | 97 | POSTING_READ(reg); |
148 | } | 98 | } |
149 | } | 99 | } |
150 | 100 | ||
@@ -152,30 +102,35 @@ void | |||
152 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 102 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
153 | { | 103 | { |
154 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | 104 | if ((dev_priv->pipestat[pipe] & mask) != 0) { |
155 | u32 reg = i915_pipestat(pipe); | 105 | u32 reg = PIPESTAT(pipe); |
156 | 106 | ||
157 | dev_priv->pipestat[pipe] &= ~mask; | 107 | dev_priv->pipestat[pipe] &= ~mask; |
158 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 108 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
159 | (void) I915_READ(reg); | 109 | POSTING_READ(reg); |
160 | } | 110 | } |
161 | } | 111 | } |
162 | 112 | ||
163 | /** | 113 | /** |
164 | * intel_enable_asle - enable ASLE interrupt for OpRegion | 114 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
165 | */ | 115 | */ |
166 | void intel_enable_asle (struct drm_device *dev) | 116 | void intel_enable_asle(struct drm_device *dev) |
167 | { | 117 | { |
168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 118 | drm_i915_private_t *dev_priv = dev->dev_private; |
119 | unsigned long irqflags; | ||
120 | |||
121 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
169 | 122 | ||
170 | if (HAS_PCH_SPLIT(dev)) | 123 | if (HAS_PCH_SPLIT(dev)) |
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 124 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
172 | else { | 125 | else { |
173 | i915_enable_pipestat(dev_priv, 1, | 126 | i915_enable_pipestat(dev_priv, 1, |
174 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 127 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
175 | if (IS_I965G(dev)) | 128 | if (INTEL_INFO(dev)->gen >= 4) |
176 | i915_enable_pipestat(dev_priv, 0, | 129 | i915_enable_pipestat(dev_priv, 0, |
177 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 130 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 131 | } |
132 | |||
133 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
179 | } | 134 | } |
180 | 135 | ||
181 | /** | 136 | /** |
@@ -191,66 +146,155 @@ static int | |||
191 | i915_pipe_enabled(struct drm_device *dev, int pipe) | 146 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
192 | { | 147 | { |
193 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 148 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
194 | unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; | 149 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; |
195 | |||
196 | if (I915_READ(pipeconf) & PIPEACONF_ENABLE) | ||
197 | return 1; | ||
198 | |||
199 | return 0; | ||
200 | } | 150 | } |
201 | 151 | ||
202 | /* Called from drm generic code, passed a 'crtc', which | 152 | /* Called from drm generic code, passed a 'crtc', which |
203 | * we use as a pipe index | 153 | * we use as a pipe index |
204 | */ | 154 | */ |
205 | u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | 155 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
206 | { | 156 | { |
207 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 157 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
208 | unsigned long high_frame; | 158 | unsigned long high_frame; |
209 | unsigned long low_frame; | 159 | unsigned long low_frame; |
210 | u32 high1, high2, low, count; | 160 | u32 high1, high2, low; |
211 | |||
212 | high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; | ||
213 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | ||
214 | 161 | ||
215 | if (!i915_pipe_enabled(dev, pipe)) { | 162 | if (!i915_pipe_enabled(dev, pipe)) { |
216 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 163 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
217 | "pipe %d\n", pipe); | 164 | "pipe %c\n", pipe_name(pipe)); |
218 | return 0; | 165 | return 0; |
219 | } | 166 | } |
220 | 167 | ||
168 | high_frame = PIPEFRAME(pipe); | ||
169 | low_frame = PIPEFRAMEPIXEL(pipe); | ||
170 | |||
221 | /* | 171 | /* |
222 | * High & low register fields aren't synchronized, so make sure | 172 | * High & low register fields aren't synchronized, so make sure |
223 | * we get a low value that's stable across two reads of the high | 173 | * we get a low value that's stable across two reads of the high |
224 | * register. | 174 | * register. |
225 | */ | 175 | */ |
226 | do { | 176 | do { |
227 | high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | 177 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
228 | PIPE_FRAME_HIGH_SHIFT); | 178 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; |
229 | low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> | 179 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
230 | PIPE_FRAME_LOW_SHIFT); | ||
231 | high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | ||
232 | PIPE_FRAME_HIGH_SHIFT); | ||
233 | } while (high1 != high2); | 180 | } while (high1 != high2); |
234 | 181 | ||
235 | count = (high1 << 8) | low; | 182 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
236 | 183 | low >>= PIPE_FRAME_LOW_SHIFT; | |
237 | return count; | 184 | return (high1 << 8) | low; |
238 | } | 185 | } |
239 | 186 | ||
240 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 187 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
241 | { | 188 | { |
242 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 189 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
243 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 190 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
244 | 191 | ||
245 | if (!i915_pipe_enabled(dev, pipe)) { | 192 | if (!i915_pipe_enabled(dev, pipe)) { |
246 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 193 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
247 | "pipe %d\n", pipe); | 194 | "pipe %c\n", pipe_name(pipe)); |
248 | return 0; | 195 | return 0; |
249 | } | 196 | } |
250 | 197 | ||
251 | return I915_READ(reg); | 198 | return I915_READ(reg); |
252 | } | 199 | } |
253 | 200 | ||
201 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | ||
202 | int *vpos, int *hpos) | ||
203 | { | ||
204 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
205 | u32 vbl = 0, position = 0; | ||
206 | int vbl_start, vbl_end, htotal, vtotal; | ||
207 | bool in_vbl = true; | ||
208 | int ret = 0; | ||
209 | |||
210 | if (!i915_pipe_enabled(dev, pipe)) { | ||
211 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | ||
212 | "pipe %c\n", pipe_name(pipe)); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | /* Get vtotal. */ | ||
217 | vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); | ||
218 | |||
219 | if (INTEL_INFO(dev)->gen >= 4) { | ||
220 | /* No obvious pixelcount register. Only query vertical | ||
221 | * scanout position from Display scan line register. | ||
222 | */ | ||
223 | position = I915_READ(PIPEDSL(pipe)); | ||
224 | |||
225 | /* Decode into vertical scanout position. Don't have | ||
226 | * horizontal scanout position. | ||
227 | */ | ||
228 | *vpos = position & 0x1fff; | ||
229 | *hpos = 0; | ||
230 | } else { | ||
231 | /* Have access to pixelcount since start of frame. | ||
232 | * We can split this into vertical and horizontal | ||
233 | * scanout position. | ||
234 | */ | ||
235 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | ||
236 | |||
237 | htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); | ||
238 | *vpos = position / htotal; | ||
239 | *hpos = position - (*vpos * htotal); | ||
240 | } | ||
241 | |||
242 | /* Query vblank area. */ | ||
243 | vbl = I915_READ(VBLANK(pipe)); | ||
244 | |||
245 | /* Test position against vblank region. */ | ||
246 | vbl_start = vbl & 0x1fff; | ||
247 | vbl_end = (vbl >> 16) & 0x1fff; | ||
248 | |||
249 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | ||
250 | in_vbl = false; | ||
251 | |||
252 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | ||
253 | if (in_vbl && (*vpos >= vbl_start)) | ||
254 | *vpos = *vpos - vtotal; | ||
255 | |||
256 | /* Readouts valid? */ | ||
257 | if (vbl > 0) | ||
258 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | ||
259 | |||
260 | /* In vblank? */ | ||
261 | if (in_vbl) | ||
262 | ret |= DRM_SCANOUTPOS_INVBL; | ||
263 | |||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | ||
268 | int *max_error, | ||
269 | struct timeval *vblank_time, | ||
270 | unsigned flags) | ||
271 | { | ||
272 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
273 | struct drm_crtc *crtc; | ||
274 | |||
275 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | ||
276 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | /* Get drm_crtc to timestamp: */ | ||
281 | crtc = intel_get_crtc_for_pipe(dev, pipe); | ||
282 | if (crtc == NULL) { | ||
283 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
284 | return -EINVAL; | ||
285 | } | ||
286 | |||
287 | if (!crtc->enabled) { | ||
288 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | ||
289 | return -EBUSY; | ||
290 | } | ||
291 | |||
292 | /* Helper routine in DRM core does all the work: */ | ||
293 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | ||
294 | vblank_time, flags, | ||
295 | crtc); | ||
296 | } | ||
297 | |||
254 | /* | 298 | /* |
255 | * Handle hotplug events outside the interrupt handler proper. | 299 | * Handle hotplug events outside the interrupt handler proper. |
256 | */ | 300 | */ |
@@ -260,16 +304,14 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
260 | hotplug_work); | 304 | hotplug_work); |
261 | struct drm_device *dev = dev_priv->dev; | 305 | struct drm_device *dev = dev_priv->dev; |
262 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
263 | struct drm_encoder *encoder; | 307 | struct intel_encoder *encoder; |
264 | 308 | ||
265 | if (mode_config->num_encoder) { | 309 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
266 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 310 | |
267 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 311 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
268 | 312 | if (encoder->hot_plug) | |
269 | if (intel_encoder->hot_plug) | 313 | encoder->hot_plug(encoder); |
270 | (*intel_encoder->hot_plug) (intel_encoder); | 314 | |
271 | } | ||
272 | } | ||
273 | /* Just fire off a uevent and let userspace tell us what to do */ | 315 | /* Just fire off a uevent and let userspace tell us what to do */ |
274 | drm_helper_hpd_irq_event(dev); | 316 | drm_helper_hpd_irq_event(dev); |
275 | } | 317 | } |
@@ -305,24 +347,142 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
305 | return; | 347 | return; |
306 | } | 348 | } |
307 | 349 | ||
308 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 350 | static void notify_ring(struct drm_device *dev, |
351 | struct intel_ring_buffer *ring) | ||
352 | { | ||
353 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
354 | u32 seqno; | ||
355 | |||
356 | if (ring->obj == NULL) | ||
357 | return; | ||
358 | |||
359 | seqno = ring->get_seqno(ring); | ||
360 | trace_i915_gem_request_complete(ring, seqno); | ||
361 | |||
362 | ring->irq_seqno = seqno; | ||
363 | wake_up_all(&ring->irq_queue); | ||
364 | |||
365 | dev_priv->hangcheck_count = 0; | ||
366 | mod_timer(&dev_priv->hangcheck_timer, | ||
367 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
368 | } | ||
369 | |||
370 | static void gen6_pm_rps_work(struct work_struct *work) | ||
371 | { | ||
372 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
373 | rps_work); | ||
374 | u8 new_delay = dev_priv->cur_delay; | ||
375 | u32 pm_iir, pm_imr; | ||
376 | |||
377 | spin_lock_irq(&dev_priv->rps_lock); | ||
378 | pm_iir = dev_priv->pm_iir; | ||
379 | dev_priv->pm_iir = 0; | ||
380 | pm_imr = I915_READ(GEN6_PMIMR); | ||
381 | spin_unlock_irq(&dev_priv->rps_lock); | ||
382 | |||
383 | if (!pm_iir) | ||
384 | return; | ||
385 | |||
386 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
387 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | ||
388 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
389 | new_delay = dev_priv->cur_delay + 1; | ||
390 | if (new_delay > dev_priv->max_delay) | ||
391 | new_delay = dev_priv->max_delay; | ||
392 | } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { | ||
393 | gen6_gt_force_wake_get(dev_priv); | ||
394 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
395 | new_delay = dev_priv->cur_delay - 1; | ||
396 | if (new_delay < dev_priv->min_delay) { | ||
397 | new_delay = dev_priv->min_delay; | ||
398 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
399 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) | | ||
400 | ((new_delay << 16) & 0x3f0000)); | ||
401 | } else { | ||
402 | /* Make sure we continue to get down interrupts | ||
403 | * until we hit the minimum frequency */ | ||
404 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
405 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); | ||
406 | } | ||
407 | gen6_gt_force_wake_put(dev_priv); | ||
408 | } | ||
409 | |||
410 | gen6_set_rps(dev_priv->dev, new_delay); | ||
411 | dev_priv->cur_delay = new_delay; | ||
412 | |||
413 | /* | ||
414 | * rps_lock not held here because clearing is non-destructive. There is | ||
415 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented | ||
416 | * by holding struct_mutex for the duration of the write. | ||
417 | */ | ||
418 | I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir); | ||
419 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
420 | } | ||
421 | |||
422 | static void pch_irq_handler(struct drm_device *dev) | ||
423 | { | ||
424 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
425 | u32 pch_iir; | ||
426 | int pipe; | ||
427 | |||
428 | pch_iir = I915_READ(SDEIIR); | ||
429 | |||
430 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
431 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
432 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
433 | SDE_AUDIO_POWER_SHIFT); | ||
434 | |||
435 | if (pch_iir & SDE_GMBUS) | ||
436 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
437 | |||
438 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
439 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
440 | |||
441 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
442 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
443 | |||
444 | if (pch_iir & SDE_POISON) | ||
445 | DRM_ERROR("PCH poison interrupt\n"); | ||
446 | |||
447 | if (pch_iir & SDE_FDI_MASK) | ||
448 | for_each_pipe(pipe) | ||
449 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | ||
450 | pipe_name(pipe), | ||
451 | I915_READ(FDI_RX_IIR(pipe))); | ||
452 | |||
453 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
454 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
455 | |||
456 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
457 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
458 | |||
459 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
460 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
461 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
462 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
463 | } | ||
464 | |||
465 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | ||
309 | { | 466 | { |
467 | struct drm_device *dev = (struct drm_device *) arg; | ||
310 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 468 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
311 | int ret = IRQ_NONE; | 469 | int ret = IRQ_NONE; |
312 | u32 de_iir, gt_iir, de_ier, pch_iir; | 470 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
313 | struct drm_i915_master_private *master_priv; | 471 | struct drm_i915_master_private *master_priv; |
314 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 472 | |
473 | atomic_inc(&dev_priv->irq_received); | ||
315 | 474 | ||
316 | /* disable master interrupt before clearing iir */ | 475 | /* disable master interrupt before clearing iir */ |
317 | de_ier = I915_READ(DEIER); | 476 | de_ier = I915_READ(DEIER); |
318 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 477 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
319 | (void)I915_READ(DEIER); | 478 | POSTING_READ(DEIER); |
320 | 479 | ||
321 | de_iir = I915_READ(DEIIR); | 480 | de_iir = I915_READ(DEIIR); |
322 | gt_iir = I915_READ(GTIIR); | 481 | gt_iir = I915_READ(GTIIR); |
323 | pch_iir = I915_READ(SDEIIR); | 482 | pch_iir = I915_READ(SDEIIR); |
483 | pm_iir = I915_READ(GEN6_PMIIR); | ||
324 | 484 | ||
325 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 485 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) |
326 | goto done; | 486 | goto done; |
327 | 487 | ||
328 | ret = IRQ_HANDLED; | 488 | ret = IRQ_HANDLED; |
@@ -334,29 +494,123 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
334 | READ_BREADCRUMB(dev_priv); | 494 | READ_BREADCRUMB(dev_priv); |
335 | } | 495 | } |
336 | 496 | ||
337 | if (gt_iir & GT_PIPE_NOTIFY) { | 497 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
338 | u32 seqno = render_ring->get_gem_seqno(dev, render_ring); | 498 | notify_ring(dev, &dev_priv->ring[RCS]); |
339 | render_ring->irq_gem_seqno = seqno; | 499 | if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) |
340 | trace_i915_gem_request_complete(dev, seqno); | 500 | notify_ring(dev, &dev_priv->ring[VCS]); |
341 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | 501 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
342 | dev_priv->hangcheck_count = 0; | 502 | notify_ring(dev, &dev_priv->ring[BCS]); |
343 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 503 | |
504 | if (de_iir & DE_GSE_IVB) | ||
505 | intel_opregion_gse_intr(dev); | ||
506 | |||
507 | if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { | ||
508 | intel_prepare_page_flip(dev, 0); | ||
509 | intel_finish_page_flip_plane(dev, 0); | ||
510 | } | ||
511 | |||
512 | if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { | ||
513 | intel_prepare_page_flip(dev, 1); | ||
514 | intel_finish_page_flip_plane(dev, 1); | ||
515 | } | ||
516 | |||
517 | if (de_iir & DE_PIPEA_VBLANK_IVB) | ||
518 | drm_handle_vblank(dev, 0); | ||
519 | |||
520 | if (de_iir & DE_PIPEB_VBLANK_IVB) | ||
521 | drm_handle_vblank(dev, 1); | ||
522 | |||
523 | /* check event from PCH */ | ||
524 | if (de_iir & DE_PCH_EVENT_IVB) { | ||
525 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | ||
526 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
527 | pch_irq_handler(dev); | ||
344 | } | 528 | } |
345 | if (gt_iir & GT_BSD_USER_INTERRUPT) | ||
346 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
347 | 529 | ||
530 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { | ||
531 | unsigned long flags; | ||
532 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | ||
533 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | ||
534 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
535 | dev_priv->pm_iir |= pm_iir; | ||
536 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | ||
537 | queue_work(dev_priv->wq, &dev_priv->rps_work); | ||
538 | } | ||
539 | |||
540 | /* should clear PCH hotplug event before clear CPU irq */ | ||
541 | I915_WRITE(SDEIIR, pch_iir); | ||
542 | I915_WRITE(GTIIR, gt_iir); | ||
543 | I915_WRITE(DEIIR, de_iir); | ||
544 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
545 | |||
546 | done: | ||
547 | I915_WRITE(DEIER, de_ier); | ||
548 | POSTING_READ(DEIER); | ||
549 | |||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | ||
554 | { | ||
555 | struct drm_device *dev = (struct drm_device *) arg; | ||
556 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
557 | int ret = IRQ_NONE; | ||
558 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; | ||
559 | u32 hotplug_mask; | ||
560 | struct drm_i915_master_private *master_priv; | ||
561 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | ||
562 | |||
563 | atomic_inc(&dev_priv->irq_received); | ||
564 | |||
565 | if (IS_GEN6(dev)) | ||
566 | bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; | ||
567 | |||
568 | /* disable master interrupt before clearing iir */ | ||
569 | de_ier = I915_READ(DEIER); | ||
570 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | ||
571 | POSTING_READ(DEIER); | ||
572 | |||
573 | de_iir = I915_READ(DEIIR); | ||
574 | gt_iir = I915_READ(GTIIR); | ||
575 | pch_iir = I915_READ(SDEIIR); | ||
576 | pm_iir = I915_READ(GEN6_PMIIR); | ||
577 | |||
578 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && | ||
579 | (!IS_GEN6(dev) || pm_iir == 0)) | ||
580 | goto done; | ||
581 | |||
582 | if (HAS_PCH_CPT(dev)) | ||
583 | hotplug_mask = SDE_HOTPLUG_MASK_CPT; | ||
584 | else | ||
585 | hotplug_mask = SDE_HOTPLUG_MASK; | ||
586 | |||
587 | ret = IRQ_HANDLED; | ||
588 | |||
589 | if (dev->primary->master) { | ||
590 | master_priv = dev->primary->master->driver_priv; | ||
591 | if (master_priv->sarea_priv) | ||
592 | master_priv->sarea_priv->last_dispatch = | ||
593 | READ_BREADCRUMB(dev_priv); | ||
594 | } | ||
595 | |||
596 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) | ||
597 | notify_ring(dev, &dev_priv->ring[RCS]); | ||
598 | if (gt_iir & bsd_usr_interrupt) | ||
599 | notify_ring(dev, &dev_priv->ring[VCS]); | ||
600 | if (gt_iir & GT_BLT_USER_INTERRUPT) | ||
601 | notify_ring(dev, &dev_priv->ring[BCS]); | ||
348 | 602 | ||
349 | if (de_iir & DE_GSE) | 603 | if (de_iir & DE_GSE) |
350 | ironlake_opregion_gse_intr(dev); | 604 | intel_opregion_gse_intr(dev); |
351 | 605 | ||
352 | if (de_iir & DE_PLANEA_FLIP_DONE) { | 606 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
353 | intel_prepare_page_flip(dev, 0); | 607 | intel_prepare_page_flip(dev, 0); |
354 | intel_finish_page_flip(dev, 0); | 608 | intel_finish_page_flip_plane(dev, 0); |
355 | } | 609 | } |
356 | 610 | ||
357 | if (de_iir & DE_PLANEB_FLIP_DONE) { | 611 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
358 | intel_prepare_page_flip(dev, 1); | 612 | intel_prepare_page_flip(dev, 1); |
359 | intel_finish_page_flip(dev, 1); | 613 | intel_finish_page_flip_plane(dev, 1); |
360 | } | 614 | } |
361 | 615 | ||
362 | if (de_iir & DE_PIPEA_VBLANK) | 616 | if (de_iir & DE_PIPEA_VBLANK) |
@@ -366,9 +620,10 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
366 | drm_handle_vblank(dev, 1); | 620 | drm_handle_vblank(dev, 1); |
367 | 621 | ||
368 | /* check event from PCH */ | 622 | /* check event from PCH */ |
369 | if ((de_iir & DE_PCH_EVENT) && | 623 | if (de_iir & DE_PCH_EVENT) { |
370 | (pch_iir & SDE_HOTPLUG_MASK)) { | 624 | if (pch_iir & hotplug_mask) |
371 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 625 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
626 | pch_irq_handler(dev); | ||
372 | } | 627 | } |
373 | 628 | ||
374 | if (de_iir & DE_PCU_EVENT) { | 629 | if (de_iir & DE_PCU_EVENT) { |
@@ -376,14 +631,34 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
376 | i915_handle_rps_change(dev); | 631 | i915_handle_rps_change(dev); |
377 | } | 632 | } |
378 | 633 | ||
634 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { | ||
635 | /* | ||
636 | * IIR bits should never already be set because IMR should | ||
637 | * prevent an interrupt from being shown in IIR. The warning | ||
638 | * displays a case where we've unsafely cleared | ||
639 | * dev_priv->pm_iir. Although missing an interrupt of the same | ||
640 | * type is not a problem, it displays a problem in the logic. | ||
641 | * | ||
642 | * The mask bit in IMR is cleared by rps_work. | ||
643 | */ | ||
644 | unsigned long flags; | ||
645 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | ||
646 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | ||
647 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
648 | dev_priv->pm_iir |= pm_iir; | ||
649 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | ||
650 | queue_work(dev_priv->wq, &dev_priv->rps_work); | ||
651 | } | ||
652 | |||
379 | /* should clear PCH hotplug event before clear CPU irq */ | 653 | /* should clear PCH hotplug event before clear CPU irq */ |
380 | I915_WRITE(SDEIIR, pch_iir); | 654 | I915_WRITE(SDEIIR, pch_iir); |
381 | I915_WRITE(GTIIR, gt_iir); | 655 | I915_WRITE(GTIIR, gt_iir); |
382 | I915_WRITE(DEIIR, de_iir); | 656 | I915_WRITE(DEIIR, de_iir); |
657 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
383 | 658 | ||
384 | done: | 659 | done: |
385 | I915_WRITE(DEIER, de_ier); | 660 | I915_WRITE(DEIER, de_ier); |
386 | (void)I915_READ(DEIER); | 661 | POSTING_READ(DEIER); |
387 | 662 | ||
388 | return ret; | 663 | return ret; |
389 | } | 664 | } |
@@ -404,47 +679,38 @@ static void i915_error_work_func(struct work_struct *work) | |||
404 | char *reset_event[] = { "RESET=1", NULL }; | 679 | char *reset_event[] = { "RESET=1", NULL }; |
405 | char *reset_done_event[] = { "ERROR=0", NULL }; | 680 | char *reset_done_event[] = { "ERROR=0", NULL }; |
406 | 681 | ||
407 | DRM_DEBUG_DRIVER("generating error event\n"); | ||
408 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 682 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
409 | 683 | ||
410 | if (atomic_read(&dev_priv->mm.wedged)) { | 684 | if (atomic_read(&dev_priv->mm.wedged)) { |
411 | if (IS_I965G(dev)) { | 685 | DRM_DEBUG_DRIVER("resetting chip\n"); |
412 | DRM_DEBUG_DRIVER("resetting chip\n"); | 686 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); |
413 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | 687 | if (!i915_reset(dev, GRDOM_RENDER)) { |
414 | if (!i965_reset(dev, GDRST_RENDER)) { | 688 | atomic_set(&dev_priv->mm.wedged, 0); |
415 | atomic_set(&dev_priv->mm.wedged, 0); | 689 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); |
416 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | ||
417 | } | ||
418 | } else { | ||
419 | DRM_DEBUG_DRIVER("reboot required\n"); | ||
420 | } | 690 | } |
691 | complete_all(&dev_priv->error_completion); | ||
421 | } | 692 | } |
422 | } | 693 | } |
423 | 694 | ||
695 | #ifdef CONFIG_DEBUG_FS | ||
424 | static struct drm_i915_error_object * | 696 | static struct drm_i915_error_object * |
425 | i915_error_object_create(struct drm_device *dev, | 697 | i915_error_object_create(struct drm_i915_private *dev_priv, |
426 | struct drm_gem_object *src) | 698 | struct drm_i915_gem_object *src) |
427 | { | 699 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
429 | struct drm_i915_error_object *dst; | 700 | struct drm_i915_error_object *dst; |
430 | struct drm_i915_gem_object *src_priv; | ||
431 | int page, page_count; | 701 | int page, page_count; |
432 | u32 reloc_offset; | 702 | u32 reloc_offset; |
433 | 703 | ||
434 | if (src == NULL) | 704 | if (src == NULL || src->pages == NULL) |
435 | return NULL; | 705 | return NULL; |
436 | 706 | ||
437 | src_priv = to_intel_bo(src); | 707 | page_count = src->base.size / PAGE_SIZE; |
438 | if (src_priv->pages == NULL) | ||
439 | return NULL; | ||
440 | |||
441 | page_count = src->size / PAGE_SIZE; | ||
442 | 708 | ||
443 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 709 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); |
444 | if (dst == NULL) | 710 | if (dst == NULL) |
445 | return NULL; | 711 | return NULL; |
446 | 712 | ||
447 | reloc_offset = src_priv->gtt_offset; | 713 | reloc_offset = src->gtt_offset; |
448 | for (page = 0; page < page_count; page++) { | 714 | for (page = 0; page < page_count; page++) { |
449 | unsigned long flags; | 715 | unsigned long flags; |
450 | void __iomem *s; | 716 | void __iomem *s; |
@@ -456,10 +722,9 @@ i915_error_object_create(struct drm_device *dev, | |||
456 | 722 | ||
457 | local_irq_save(flags); | 723 | local_irq_save(flags); |
458 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 724 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
459 | reloc_offset, | 725 | reloc_offset); |
460 | KM_IRQ0); | ||
461 | memcpy_fromio(d, s, PAGE_SIZE); | 726 | memcpy_fromio(d, s, PAGE_SIZE); |
462 | io_mapping_unmap_atomic(s, KM_IRQ0); | 727 | io_mapping_unmap_atomic(s); |
463 | local_irq_restore(flags); | 728 | local_irq_restore(flags); |
464 | 729 | ||
465 | dst->pages[page] = d; | 730 | dst->pages[page] = d; |
@@ -467,7 +732,7 @@ i915_error_object_create(struct drm_device *dev, | |||
467 | reloc_offset += PAGE_SIZE; | 732 | reloc_offset += PAGE_SIZE; |
468 | } | 733 | } |
469 | dst->page_count = page_count; | 734 | dst->page_count = page_count; |
470 | dst->gtt_offset = src_priv->gtt_offset; | 735 | dst->gtt_offset = src->gtt_offset; |
471 | 736 | ||
472 | return dst; | 737 | return dst; |
473 | 738 | ||
@@ -496,61 +761,111 @@ static void | |||
496 | i915_error_state_free(struct drm_device *dev, | 761 | i915_error_state_free(struct drm_device *dev, |
497 | struct drm_i915_error_state *error) | 762 | struct drm_i915_error_state *error) |
498 | { | 763 | { |
499 | i915_error_object_free(error->batchbuffer[0]); | 764 | int i; |
500 | i915_error_object_free(error->batchbuffer[1]); | 765 | |
501 | i915_error_object_free(error->ringbuffer); | 766 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) |
767 | i915_error_object_free(error->batchbuffer[i]); | ||
768 | |||
769 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) | ||
770 | i915_error_object_free(error->ringbuffer[i]); | ||
771 | |||
502 | kfree(error->active_bo); | 772 | kfree(error->active_bo); |
503 | kfree(error->overlay); | 773 | kfree(error->overlay); |
504 | kfree(error); | 774 | kfree(error); |
505 | } | 775 | } |
506 | 776 | ||
507 | static u32 | 777 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
508 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | 778 | int count, |
779 | struct list_head *head) | ||
509 | { | 780 | { |
510 | u32 cmd; | 781 | struct drm_i915_gem_object *obj; |
782 | int i = 0; | ||
783 | |||
784 | list_for_each_entry(obj, head, mm_list) { | ||
785 | err->size = obj->base.size; | ||
786 | err->name = obj->base.name; | ||
787 | err->seqno = obj->last_rendering_seqno; | ||
788 | err->gtt_offset = obj->gtt_offset; | ||
789 | err->read_domains = obj->base.read_domains; | ||
790 | err->write_domain = obj->base.write_domain; | ||
791 | err->fence_reg = obj->fence_reg; | ||
792 | err->pinned = 0; | ||
793 | if (obj->pin_count > 0) | ||
794 | err->pinned = 1; | ||
795 | if (obj->user_pin_count > 0) | ||
796 | err->pinned = -1; | ||
797 | err->tiling = obj->tiling_mode; | ||
798 | err->dirty = obj->dirty; | ||
799 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | ||
800 | err->ring = obj->ring ? obj->ring->id : 0; | ||
801 | err->cache_level = obj->cache_level; | ||
802 | |||
803 | if (++i == count) | ||
804 | break; | ||
511 | 805 | ||
512 | if (IS_I830(dev) || IS_845G(dev)) | 806 | err++; |
513 | cmd = MI_BATCH_BUFFER; | 807 | } |
514 | else if (IS_I965G(dev)) | ||
515 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
516 | MI_BATCH_NON_SECURE_I965); | ||
517 | else | ||
518 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
519 | 808 | ||
520 | return ring[0] == cmd ? ring[1] : 0; | 809 | return i; |
521 | } | 810 | } |
522 | 811 | ||
523 | static u32 | 812 | static void i915_gem_record_fences(struct drm_device *dev, |
524 | i915_ringbuffer_last_batch(struct drm_device *dev) | 813 | struct drm_i915_error_state *error) |
525 | { | 814 | { |
526 | struct drm_i915_private *dev_priv = dev->dev_private; | 815 | struct drm_i915_private *dev_priv = dev->dev_private; |
527 | u32 head, bbaddr; | 816 | int i; |
528 | u32 *ring; | 817 | |
529 | 818 | /* Fences */ | |
530 | /* Locate the current position in the ringbuffer and walk back | 819 | switch (INTEL_INFO(dev)->gen) { |
531 | * to find the most recently dispatched batch buffer. | 820 | case 6: |
532 | */ | 821 | for (i = 0; i < 16; i++) |
533 | bbaddr = 0; | 822 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
534 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 823 | break; |
535 | ring = (u32 *)(dev_priv->render_ring.virtual_start + head); | 824 | case 5: |
825 | case 4: | ||
826 | for (i = 0; i < 16; i++) | ||
827 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
828 | break; | ||
829 | case 3: | ||
830 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
831 | for (i = 0; i < 8; i++) | ||
832 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
833 | case 2: | ||
834 | for (i = 0; i < 8; i++) | ||
835 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
836 | break; | ||
536 | 837 | ||
537 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | ||
538 | bbaddr = i915_get_bbaddr(dev, ring); | ||
539 | if (bbaddr) | ||
540 | break; | ||
541 | } | 838 | } |
839 | } | ||
542 | 840 | ||
543 | if (bbaddr == 0) { | 841 | static struct drm_i915_error_object * |
544 | ring = (u32 *)(dev_priv->render_ring.virtual_start | 842 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
545 | + dev_priv->render_ring.size); | 843 | struct intel_ring_buffer *ring) |
546 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | 844 | { |
547 | bbaddr = i915_get_bbaddr(dev, ring); | 845 | struct drm_i915_gem_object *obj; |
548 | if (bbaddr) | 846 | u32 seqno; |
549 | break; | 847 | |
550 | } | 848 | if (!ring->get_seqno) |
849 | return NULL; | ||
850 | |||
851 | seqno = ring->get_seqno(ring); | ||
852 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
853 | if (obj->ring != ring) | ||
854 | continue; | ||
855 | |||
856 | if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) | ||
857 | continue; | ||
858 | |||
859 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
860 | continue; | ||
861 | |||
862 | /* We need to copy these to an anonymous buffer as the simplest | ||
863 | * method to avoid being overwritten by userspace. | ||
864 | */ | ||
865 | return i915_error_object_create(dev_priv, obj); | ||
551 | } | 866 | } |
552 | 867 | ||
553 | return bbaddr; | 868 | return NULL; |
554 | } | 869 | } |
555 | 870 | ||
556 | /** | 871 | /** |
@@ -565,12 +880,10 @@ i915_ringbuffer_last_batch(struct drm_device *dev) | |||
565 | static void i915_capture_error_state(struct drm_device *dev) | 880 | static void i915_capture_error_state(struct drm_device *dev) |
566 | { | 881 | { |
567 | struct drm_i915_private *dev_priv = dev->dev_private; | 882 | struct drm_i915_private *dev_priv = dev->dev_private; |
568 | struct drm_i915_gem_object *obj_priv; | 883 | struct drm_i915_gem_object *obj; |
569 | struct drm_i915_error_state *error; | 884 | struct drm_i915_error_state *error; |
570 | struct drm_gem_object *batchbuffer[2]; | ||
571 | unsigned long flags; | 885 | unsigned long flags; |
572 | u32 bbaddr; | 886 | int i, pipe; |
573 | int count; | ||
574 | 887 | ||
575 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 888 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
576 | error = dev_priv->first_error; | 889 | error = dev_priv->first_error; |
@@ -578,25 +891,43 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
578 | if (error) | 891 | if (error) |
579 | return; | 892 | return; |
580 | 893 | ||
894 | /* Account for pipe specific data like PIPE*STAT */ | ||
581 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 895 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
582 | if (!error) { | 896 | if (!error) { |
583 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 897 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
584 | return; | 898 | return; |
585 | } | 899 | } |
586 | 900 | ||
587 | error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); | 901 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
902 | dev->primary->index); | ||
903 | |||
904 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); | ||
588 | error->eir = I915_READ(EIR); | 905 | error->eir = I915_READ(EIR); |
589 | error->pgtbl_er = I915_READ(PGTBL_ER); | 906 | error->pgtbl_er = I915_READ(PGTBL_ER); |
590 | error->pipeastat = I915_READ(PIPEASTAT); | 907 | for_each_pipe(pipe) |
591 | error->pipebstat = I915_READ(PIPEBSTAT); | 908 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
592 | error->instpm = I915_READ(INSTPM); | 909 | error->instpm = I915_READ(INSTPM); |
593 | if (!IS_I965G(dev)) { | 910 | error->error = 0; |
594 | error->ipeir = I915_READ(IPEIR); | 911 | if (INTEL_INFO(dev)->gen >= 6) { |
595 | error->ipehr = I915_READ(IPEHR); | 912 | error->error = I915_READ(ERROR_GEN6); |
596 | error->instdone = I915_READ(INSTDONE); | 913 | |
597 | error->acthd = I915_READ(ACTHD); | 914 | error->bcs_acthd = I915_READ(BCS_ACTHD); |
598 | error->bbaddr = 0; | 915 | error->bcs_ipehr = I915_READ(BCS_IPEHR); |
599 | } else { | 916 | error->bcs_ipeir = I915_READ(BCS_IPEIR); |
917 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
918 | error->bcs_seqno = 0; | ||
919 | if (dev_priv->ring[BCS].get_seqno) | ||
920 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
921 | |||
922 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
923 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
924 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
925 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
926 | error->vcs_seqno = 0; | ||
927 | if (dev_priv->ring[VCS].get_seqno) | ||
928 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
929 | } | ||
930 | if (INTEL_INFO(dev)->gen >= 4) { | ||
600 | error->ipeir = I915_READ(IPEIR_I965); | 931 | error->ipeir = I915_READ(IPEIR_I965); |
601 | error->ipehr = I915_READ(IPEHR_I965); | 932 | error->ipehr = I915_READ(IPEHR_I965); |
602 | error->instdone = I915_READ(INSTDONE_I965); | 933 | error->instdone = I915_READ(INSTDONE_I965); |
@@ -604,121 +935,64 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
604 | error->instdone1 = I915_READ(INSTDONE1); | 935 | error->instdone1 = I915_READ(INSTDONE1); |
605 | error->acthd = I915_READ(ACTHD_I965); | 936 | error->acthd = I915_READ(ACTHD_I965); |
606 | error->bbaddr = I915_READ64(BB_ADDR); | 937 | error->bbaddr = I915_READ64(BB_ADDR); |
938 | } else { | ||
939 | error->ipeir = I915_READ(IPEIR); | ||
940 | error->ipehr = I915_READ(IPEHR); | ||
941 | error->instdone = I915_READ(INSTDONE); | ||
942 | error->acthd = I915_READ(ACTHD); | ||
943 | error->bbaddr = 0; | ||
607 | } | 944 | } |
945 | i915_gem_record_fences(dev, error); | ||
608 | 946 | ||
609 | bbaddr = i915_ringbuffer_last_batch(dev); | 947 | /* Record the active batch and ring buffers */ |
610 | 948 | for (i = 0; i < I915_NUM_RINGS; i++) { | |
611 | /* Grab the current batchbuffer, most likely to have crashed. */ | 949 | error->batchbuffer[i] = |
612 | batchbuffer[0] = NULL; | 950 | i915_error_first_batchbuffer(dev_priv, |
613 | batchbuffer[1] = NULL; | 951 | &dev_priv->ring[i]); |
614 | count = 0; | ||
615 | list_for_each_entry(obj_priv, | ||
616 | &dev_priv->render_ring.active_list, list) { | ||
617 | |||
618 | struct drm_gem_object *obj = &obj_priv->base; | ||
619 | |||
620 | if (batchbuffer[0] == NULL && | ||
621 | bbaddr >= obj_priv->gtt_offset && | ||
622 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
623 | batchbuffer[0] = obj; | ||
624 | |||
625 | if (batchbuffer[1] == NULL && | ||
626 | error->acthd >= obj_priv->gtt_offset && | ||
627 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
628 | batchbuffer[1] = obj; | ||
629 | |||
630 | count++; | ||
631 | } | ||
632 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
633 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
634 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | ||
635 | struct drm_gem_object *obj = &obj_priv->base; | ||
636 | |||
637 | if (batchbuffer[0] == NULL && | ||
638 | bbaddr >= obj_priv->gtt_offset && | ||
639 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
640 | batchbuffer[0] = obj; | ||
641 | |||
642 | if (batchbuffer[1] == NULL && | ||
643 | error->acthd >= obj_priv->gtt_offset && | ||
644 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
645 | batchbuffer[1] = obj; | ||
646 | |||
647 | if (batchbuffer[0] && batchbuffer[1]) | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
652 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
653 | struct drm_gem_object *obj = &obj_priv->base; | ||
654 | |||
655 | if (batchbuffer[0] == NULL && | ||
656 | bbaddr >= obj_priv->gtt_offset && | ||
657 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
658 | batchbuffer[0] = obj; | ||
659 | |||
660 | if (batchbuffer[1] == NULL && | ||
661 | error->acthd >= obj_priv->gtt_offset && | ||
662 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
663 | batchbuffer[1] = obj; | ||
664 | 952 | ||
665 | if (batchbuffer[0] && batchbuffer[1]) | 953 | error->ringbuffer[i] = |
666 | break; | 954 | i915_error_object_create(dev_priv, |
667 | } | 955 | dev_priv->ring[i].obj); |
668 | } | 956 | } |
669 | 957 | ||
670 | /* We need to copy these to an anonymous buffer as the simplest | 958 | /* Record buffers on the active and pinned lists. */ |
671 | * method to avoid being overwritten by userpace. | 959 | error->active_bo = NULL; |
672 | */ | 960 | error->pinned_bo = NULL; |
673 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
674 | if (batchbuffer[1] != batchbuffer[0]) | ||
675 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
676 | else | ||
677 | error->batchbuffer[1] = NULL; | ||
678 | 961 | ||
679 | /* Record the ringbuffer */ | 962 | i = 0; |
680 | error->ringbuffer = i915_error_object_create(dev, | 963 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
681 | dev_priv->render_ring.gem_object); | 964 | i++; |
965 | error->active_bo_count = i; | ||
966 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | ||
967 | i++; | ||
968 | error->pinned_bo_count = i - error->active_bo_count; | ||
682 | 969 | ||
683 | /* Record buffers on the active list. */ | ||
684 | error->active_bo = NULL; | 970 | error->active_bo = NULL; |
685 | error->active_bo_count = 0; | 971 | error->pinned_bo = NULL; |
686 | 972 | if (i) { | |
687 | if (count) | 973 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
688 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | ||
689 | GFP_ATOMIC); | 974 | GFP_ATOMIC); |
690 | 975 | if (error->active_bo) | |
691 | if (error->active_bo) { | 976 | error->pinned_bo = |
692 | int i = 0; | 977 | error->active_bo + error->active_bo_count; |
693 | list_for_each_entry(obj_priv, | ||
694 | &dev_priv->render_ring.active_list, list) { | ||
695 | struct drm_gem_object *obj = &obj_priv->base; | ||
696 | |||
697 | error->active_bo[i].size = obj->size; | ||
698 | error->active_bo[i].name = obj->name; | ||
699 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | ||
700 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
701 | error->active_bo[i].read_domains = obj->read_domains; | ||
702 | error->active_bo[i].write_domain = obj->write_domain; | ||
703 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
704 | error->active_bo[i].pinned = 0; | ||
705 | if (obj_priv->pin_count > 0) | ||
706 | error->active_bo[i].pinned = 1; | ||
707 | if (obj_priv->user_pin_count > 0) | ||
708 | error->active_bo[i].pinned = -1; | ||
709 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
710 | error->active_bo[i].dirty = obj_priv->dirty; | ||
711 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
712 | |||
713 | if (++i == count) | ||
714 | break; | ||
715 | } | ||
716 | error->active_bo_count = i; | ||
717 | } | 978 | } |
718 | 979 | ||
980 | if (error->active_bo) | ||
981 | error->active_bo_count = | ||
982 | capture_bo_list(error->active_bo, | ||
983 | error->active_bo_count, | ||
984 | &dev_priv->mm.active_list); | ||
985 | |||
986 | if (error->pinned_bo) | ||
987 | error->pinned_bo_count = | ||
988 | capture_bo_list(error->pinned_bo, | ||
989 | error->pinned_bo_count, | ||
990 | &dev_priv->mm.pinned_list); | ||
991 | |||
719 | do_gettimeofday(&error->time); | 992 | do_gettimeofday(&error->time); |
720 | 993 | ||
721 | error->overlay = intel_overlay_capture_error_state(dev); | 994 | error->overlay = intel_overlay_capture_error_state(dev); |
995 | error->display = intel_display_capture_error_state(dev); | ||
722 | 996 | ||
723 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 997 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
724 | if (dev_priv->first_error == NULL) { | 998 | if (dev_priv->first_error == NULL) { |
@@ -744,11 +1018,15 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
744 | if (error) | 1018 | if (error) |
745 | i915_error_state_free(dev, error); | 1019 | i915_error_state_free(dev, error); |
746 | } | 1020 | } |
1021 | #else | ||
1022 | #define i915_capture_error_state(x) | ||
1023 | #endif | ||
747 | 1024 | ||
748 | static void i915_report_and_clear_eir(struct drm_device *dev) | 1025 | static void i915_report_and_clear_eir(struct drm_device *dev) |
749 | { | 1026 | { |
750 | struct drm_i915_private *dev_priv = dev->dev_private; | 1027 | struct drm_i915_private *dev_priv = dev->dev_private; |
751 | u32 eir = I915_READ(EIR); | 1028 | u32 eir = I915_READ(EIR); |
1029 | int pipe; | ||
752 | 1030 | ||
753 | if (!eir) | 1031 | if (!eir) |
754 | return; | 1032 | return; |
@@ -773,7 +1051,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
773 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1051 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
774 | I915_READ(ACTHD_I965)); | 1052 | I915_READ(ACTHD_I965)); |
775 | I915_WRITE(IPEIR_I965, ipeir); | 1053 | I915_WRITE(IPEIR_I965, ipeir); |
776 | (void)I915_READ(IPEIR_I965); | 1054 | POSTING_READ(IPEIR_I965); |
777 | } | 1055 | } |
778 | if (eir & GM45_ERROR_PAGE_TABLE) { | 1056 | if (eir & GM45_ERROR_PAGE_TABLE) { |
779 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1057 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
@@ -781,37 +1059,33 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
781 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 1059 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
782 | pgtbl_err); | 1060 | pgtbl_err); |
783 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1061 | I915_WRITE(PGTBL_ER, pgtbl_err); |
784 | (void)I915_READ(PGTBL_ER); | 1062 | POSTING_READ(PGTBL_ER); |
785 | } | 1063 | } |
786 | } | 1064 | } |
787 | 1065 | ||
788 | if (IS_I9XX(dev)) { | 1066 | if (!IS_GEN2(dev)) { |
789 | if (eir & I915_ERROR_PAGE_TABLE) { | 1067 | if (eir & I915_ERROR_PAGE_TABLE) { |
790 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1068 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
791 | printk(KERN_ERR "page table error\n"); | 1069 | printk(KERN_ERR "page table error\n"); |
792 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 1070 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
793 | pgtbl_err); | 1071 | pgtbl_err); |
794 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1072 | I915_WRITE(PGTBL_ER, pgtbl_err); |
795 | (void)I915_READ(PGTBL_ER); | 1073 | POSTING_READ(PGTBL_ER); |
796 | } | 1074 | } |
797 | } | 1075 | } |
798 | 1076 | ||
799 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 1077 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
800 | u32 pipea_stats = I915_READ(PIPEASTAT); | 1078 | printk(KERN_ERR "memory refresh error:\n"); |
801 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | 1079 | for_each_pipe(pipe) |
802 | 1080 | printk(KERN_ERR "pipe %c stat: 0x%08x\n", | |
803 | printk(KERN_ERR "memory refresh error\n"); | 1081 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
804 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
805 | pipea_stats); | ||
806 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
807 | pipeb_stats); | ||
808 | /* pipestat has already been acked */ | 1082 | /* pipestat has already been acked */ |
809 | } | 1083 | } |
810 | if (eir & I915_ERROR_INSTRUCTION) { | 1084 | if (eir & I915_ERROR_INSTRUCTION) { |
811 | printk(KERN_ERR "instruction error\n"); | 1085 | printk(KERN_ERR "instruction error\n"); |
812 | printk(KERN_ERR " INSTPM: 0x%08x\n", | 1086 | printk(KERN_ERR " INSTPM: 0x%08x\n", |
813 | I915_READ(INSTPM)); | 1087 | I915_READ(INSTPM)); |
814 | if (!IS_I965G(dev)) { | 1088 | if (INTEL_INFO(dev)->gen < 4) { |
815 | u32 ipeir = I915_READ(IPEIR); | 1089 | u32 ipeir = I915_READ(IPEIR); |
816 | 1090 | ||
817 | printk(KERN_ERR " IPEIR: 0x%08x\n", | 1091 | printk(KERN_ERR " IPEIR: 0x%08x\n", |
@@ -823,7 +1097,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
823 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1097 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
824 | I915_READ(ACTHD)); | 1098 | I915_READ(ACTHD)); |
825 | I915_WRITE(IPEIR, ipeir); | 1099 | I915_WRITE(IPEIR, ipeir); |
826 | (void)I915_READ(IPEIR); | 1100 | POSTING_READ(IPEIR); |
827 | } else { | 1101 | } else { |
828 | u32 ipeir = I915_READ(IPEIR_I965); | 1102 | u32 ipeir = I915_READ(IPEIR_I965); |
829 | 1103 | ||
@@ -840,12 +1114,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
840 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1114 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
841 | I915_READ(ACTHD_I965)); | 1115 | I915_READ(ACTHD_I965)); |
842 | I915_WRITE(IPEIR_I965, ipeir); | 1116 | I915_WRITE(IPEIR_I965, ipeir); |
843 | (void)I915_READ(IPEIR_I965); | 1117 | POSTING_READ(IPEIR_I965); |
844 | } | 1118 | } |
845 | } | 1119 | } |
846 | 1120 | ||
847 | I915_WRITE(EIR, eir); | 1121 | I915_WRITE(EIR, eir); |
848 | (void)I915_READ(EIR); | 1122 | POSTING_READ(EIR); |
849 | eir = I915_READ(EIR); | 1123 | eir = I915_READ(EIR); |
850 | if (eir) { | 1124 | if (eir) { |
851 | /* | 1125 | /* |
@@ -868,7 +1142,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
868 | * so userspace knows something bad happened (should trigger collection | 1142 | * so userspace knows something bad happened (should trigger collection |
869 | * of a ring dump etc.). | 1143 | * of a ring dump etc.). |
870 | */ | 1144 | */ |
871 | static void i915_handle_error(struct drm_device *dev, bool wedged) | 1145 | void i915_handle_error(struct drm_device *dev, bool wedged) |
872 | { | 1146 | { |
873 | struct drm_i915_private *dev_priv = dev->dev_private; | 1147 | struct drm_i915_private *dev_priv = dev->dev_private; |
874 | 1148 | ||
@@ -876,12 +1150,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
876 | i915_report_and_clear_eir(dev); | 1150 | i915_report_and_clear_eir(dev); |
877 | 1151 | ||
878 | if (wedged) { | 1152 | if (wedged) { |
1153 | INIT_COMPLETION(dev_priv->error_completion); | ||
879 | atomic_set(&dev_priv->mm.wedged, 1); | 1154 | atomic_set(&dev_priv->mm.wedged, 1); |
880 | 1155 | ||
881 | /* | 1156 | /* |
882 | * Wakeup waiting processes so they don't hang | 1157 | * Wakeup waiting processes so they don't hang |
883 | */ | 1158 | */ |
884 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | 1159 | wake_up_all(&dev_priv->ring[RCS].irq_queue); |
1160 | if (HAS_BSD(dev)) | ||
1161 | wake_up_all(&dev_priv->ring[VCS].irq_queue); | ||
1162 | if (HAS_BLT(dev)) | ||
1163 | wake_up_all(&dev_priv->ring[BCS].irq_queue); | ||
885 | } | 1164 | } |
886 | 1165 | ||
887 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1166 | queue_work(dev_priv->wq, &dev_priv->error_work); |
@@ -892,7 +1171,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
892 | drm_i915_private_t *dev_priv = dev->dev_private; | 1171 | drm_i915_private_t *dev_priv = dev->dev_private; |
893 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1172 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1173 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
895 | struct drm_i915_gem_object *obj_priv; | 1174 | struct drm_i915_gem_object *obj; |
896 | struct intel_unpin_work *work; | 1175 | struct intel_unpin_work *work; |
897 | unsigned long flags; | 1176 | unsigned long flags; |
898 | bool stall_detected; | 1177 | bool stall_detected; |
@@ -911,13 +1190,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
911 | } | 1190 | } |
912 | 1191 | ||
913 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1192 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
914 | obj_priv = to_intel_bo(work->pending_flip_obj); | 1193 | obj = work->pending_flip_obj; |
915 | if(IS_I965G(dev)) { | 1194 | if (INTEL_INFO(dev)->gen >= 4) { |
916 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 1195 | int dspsurf = DSPSURF(intel_crtc->plane); |
917 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | 1196 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
918 | } else { | 1197 | } else { |
919 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 1198 | int dspaddr = DSPADDR(intel_crtc->plane); |
920 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | 1199 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
921 | crtc->y * crtc->fb->pitch + | 1200 | crtc->y * crtc->fb->pitch + |
922 | crtc->x * crtc->fb->bits_per_pixel/8); | 1201 | crtc->x * crtc->fb->bits_per_pixel/8); |
923 | } | 1202 | } |
@@ -930,28 +1209,25 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
930 | } | 1209 | } |
931 | } | 1210 | } |
932 | 1211 | ||
933 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 1212 | static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
934 | { | 1213 | { |
935 | struct drm_device *dev = (struct drm_device *) arg; | 1214 | struct drm_device *dev = (struct drm_device *) arg; |
936 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1215 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
937 | struct drm_i915_master_private *master_priv; | 1216 | struct drm_i915_master_private *master_priv; |
938 | u32 iir, new_iir; | 1217 | u32 iir, new_iir; |
939 | u32 pipea_stats, pipeb_stats; | 1218 | u32 pipe_stats[I915_MAX_PIPES]; |
940 | u32 vblank_status; | 1219 | u32 vblank_status; |
941 | int vblank = 0; | 1220 | int vblank = 0; |
942 | unsigned long irqflags; | 1221 | unsigned long irqflags; |
943 | int irq_received; | 1222 | int irq_received; |
944 | int ret = IRQ_NONE; | 1223 | int ret = IRQ_NONE, pipe; |
945 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1224 | bool blc_event = false; |
946 | 1225 | ||
947 | atomic_inc(&dev_priv->irq_received); | 1226 | atomic_inc(&dev_priv->irq_received); |
948 | 1227 | ||
949 | if (HAS_PCH_SPLIT(dev)) | ||
950 | return ironlake_irq_handler(dev); | ||
951 | |||
952 | iir = I915_READ(IIR); | 1228 | iir = I915_READ(IIR); |
953 | 1229 | ||
954 | if (IS_I965G(dev)) | 1230 | if (INTEL_INFO(dev)->gen >= 4) |
955 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; | 1231 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; |
956 | else | 1232 | else |
957 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; | 1233 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; |
@@ -964,30 +1240,26 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
964 | * It doesn't set the bit in iir again, but it still produces | 1240 | * It doesn't set the bit in iir again, but it still produces |
965 | * interrupts (for non-MSI). | 1241 | * interrupts (for non-MSI). |
966 | */ | 1242 | */ |
967 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1243 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
968 | pipea_stats = I915_READ(PIPEASTAT); | ||
969 | pipeb_stats = I915_READ(PIPEBSTAT); | ||
970 | |||
971 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 1244 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
972 | i915_handle_error(dev, false); | 1245 | i915_handle_error(dev, false); |
973 | 1246 | ||
974 | /* | 1247 | for_each_pipe(pipe) { |
975 | * Clear the PIPE(A|B)STAT regs before the IIR | 1248 | int reg = PIPESTAT(pipe); |
976 | */ | 1249 | pipe_stats[pipe] = I915_READ(reg); |
977 | if (pipea_stats & 0x8000ffff) { | 1250 | |
978 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1251 | /* |
979 | DRM_DEBUG_DRIVER("pipe a underrun\n"); | 1252 | * Clear the PIPE*STAT regs before the IIR |
980 | I915_WRITE(PIPEASTAT, pipea_stats); | 1253 | */ |
981 | irq_received = 1; | 1254 | if (pipe_stats[pipe] & 0x8000ffff) { |
982 | } | 1255 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
983 | 1256 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
984 | if (pipeb_stats & 0x8000ffff) { | 1257 | pipe_name(pipe)); |
985 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1258 | I915_WRITE(reg, pipe_stats[pipe]); |
986 | DRM_DEBUG_DRIVER("pipe b underrun\n"); | 1259 | irq_received = 1; |
987 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 1260 | } |
988 | irq_received = 1; | ||
989 | } | 1261 | } |
990 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1262 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
991 | 1263 | ||
992 | if (!irq_received) | 1264 | if (!irq_received) |
993 | break; | 1265 | break; |
@@ -1019,18 +1291,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1019 | READ_BREADCRUMB(dev_priv); | 1291 | READ_BREADCRUMB(dev_priv); |
1020 | } | 1292 | } |
1021 | 1293 | ||
1022 | if (iir & I915_USER_INTERRUPT) { | 1294 | if (iir & I915_USER_INTERRUPT) |
1023 | u32 seqno = | 1295 | notify_ring(dev, &dev_priv->ring[RCS]); |
1024 | render_ring->get_gem_seqno(dev, render_ring); | 1296 | if (iir & I915_BSD_USER_INTERRUPT) |
1025 | render_ring->irq_gem_seqno = seqno; | 1297 | notify_ring(dev, &dev_priv->ring[VCS]); |
1026 | trace_i915_gem_request_complete(dev, seqno); | ||
1027 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1028 | dev_priv->hangcheck_count = 0; | ||
1029 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
1030 | } | ||
1031 | |||
1032 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | ||
1033 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1034 | 1298 | ||
1035 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 1299 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
1036 | intel_prepare_page_flip(dev, 0); | 1300 | intel_prepare_page_flip(dev, 0); |
@@ -1044,28 +1308,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1044 | intel_finish_page_flip_plane(dev, 1); | 1308 | intel_finish_page_flip_plane(dev, 1); |
1045 | } | 1309 | } |
1046 | 1310 | ||
1047 | if (pipea_stats & vblank_status) { | 1311 | for_each_pipe(pipe) { |
1048 | vblank++; | 1312 | if (pipe_stats[pipe] & vblank_status && |
1049 | drm_handle_vblank(dev, 0); | 1313 | drm_handle_vblank(dev, pipe)) { |
1050 | if (!dev_priv->flip_pending_is_done) { | 1314 | vblank++; |
1051 | i915_pageflip_stall_check(dev, 0); | 1315 | if (!dev_priv->flip_pending_is_done) { |
1052 | intel_finish_page_flip(dev, 0); | 1316 | i915_pageflip_stall_check(dev, pipe); |
1317 | intel_finish_page_flip(dev, pipe); | ||
1318 | } | ||
1053 | } | 1319 | } |
1054 | } | ||
1055 | 1320 | ||
1056 | if (pipeb_stats & vblank_status) { | 1321 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
1057 | vblank++; | 1322 | blc_event = true; |
1058 | drm_handle_vblank(dev, 1); | ||
1059 | if (!dev_priv->flip_pending_is_done) { | ||
1060 | i915_pageflip_stall_check(dev, 1); | ||
1061 | intel_finish_page_flip(dev, 1); | ||
1062 | } | ||
1063 | } | 1323 | } |
1064 | 1324 | ||
1065 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1325 | |
1066 | (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1326 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
1067 | (iir & I915_ASLE_INTERRUPT)) | 1327 | intel_opregion_asle_intr(dev); |
1068 | opregion_asle_intr(dev); | ||
1069 | 1328 | ||
1070 | /* With MSI, interrupts are only generated when iir | 1329 | /* With MSI, interrupts are only generated when iir |
1071 | * transitions from zero to nonzero. If another bit got | 1330 | * transitions from zero to nonzero. If another bit got |
@@ -1103,33 +1362,23 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1103 | if (master_priv->sarea_priv) | 1362 | if (master_priv->sarea_priv) |
1104 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 1363 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
1105 | 1364 | ||
1106 | BEGIN_LP_RING(4); | 1365 | if (BEGIN_LP_RING(4) == 0) { |
1107 | OUT_RING(MI_STORE_DWORD_INDEX); | 1366 | OUT_RING(MI_STORE_DWORD_INDEX); |
1108 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1367 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1109 | OUT_RING(dev_priv->counter); | 1368 | OUT_RING(dev_priv->counter); |
1110 | OUT_RING(MI_USER_INTERRUPT); | 1369 | OUT_RING(MI_USER_INTERRUPT); |
1111 | ADVANCE_LP_RING(); | 1370 | ADVANCE_LP_RING(); |
1371 | } | ||
1112 | 1372 | ||
1113 | return dev_priv->counter; | 1373 | return dev_priv->counter; |
1114 | } | 1374 | } |
1115 | 1375 | ||
1116 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | ||
1117 | { | ||
1118 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1119 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
1120 | |||
1121 | if (dev_priv->trace_irq_seqno == 0) | ||
1122 | render_ring->user_irq_get(dev, render_ring); | ||
1123 | |||
1124 | dev_priv->trace_irq_seqno = seqno; | ||
1125 | } | ||
1126 | |||
1127 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1376 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
1128 | { | 1377 | { |
1129 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1378 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1130 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1379 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1131 | int ret = 0; | 1380 | int ret = 0; |
1132 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1381 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1133 | 1382 | ||
1134 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 1383 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
1135 | READ_BREADCRUMB(dev_priv)); | 1384 | READ_BREADCRUMB(dev_priv)); |
@@ -1143,10 +1392,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1143 | if (master_priv->sarea_priv) | 1392 | if (master_priv->sarea_priv) |
1144 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1393 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1145 | 1394 | ||
1146 | render_ring->user_irq_get(dev, render_ring); | 1395 | if (ring->irq_get(ring)) { |
1147 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, | 1396 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
1148 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1397 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1149 | render_ring->user_irq_put(dev, render_ring); | 1398 | ring->irq_put(ring); |
1399 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | ||
1400 | ret = -EBUSY; | ||
1150 | 1401 | ||
1151 | if (ret == -EBUSY) { | 1402 | if (ret == -EBUSY) { |
1152 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1403 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1165,7 +1416,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
1165 | drm_i915_irq_emit_t *emit = data; | 1416 | drm_i915_irq_emit_t *emit = data; |
1166 | int result; | 1417 | int result; |
1167 | 1418 | ||
1168 | if (!dev_priv || !dev_priv->render_ring.virtual_start) { | 1419 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
1169 | DRM_ERROR("called with no initialization\n"); | 1420 | DRM_ERROR("called with no initialization\n"); |
1170 | return -EINVAL; | 1421 | return -EINVAL; |
1171 | } | 1422 | } |
@@ -1203,59 +1454,102 @@ int i915_irq_wait(struct drm_device *dev, void *data, | |||
1203 | /* Called from drm generic code, passed 'crtc' which | 1454 | /* Called from drm generic code, passed 'crtc' which |
1204 | * we use as a pipe index | 1455 | * we use as a pipe index |
1205 | */ | 1456 | */ |
1206 | int i915_enable_vblank(struct drm_device *dev, int pipe) | 1457 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
1207 | { | 1458 | { |
1208 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1459 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1209 | unsigned long irqflags; | 1460 | unsigned long irqflags; |
1210 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
1211 | u32 pipeconf; | ||
1212 | 1461 | ||
1213 | pipeconf = I915_READ(pipeconf_reg); | 1462 | if (!i915_pipe_enabled(dev, pipe)) |
1214 | if (!(pipeconf & PIPEACONF_ENABLE)) | ||
1215 | return -EINVAL; | 1463 | return -EINVAL; |
1216 | 1464 | ||
1217 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1465 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1218 | if (HAS_PCH_SPLIT(dev)) | 1466 | if (INTEL_INFO(dev)->gen >= 4) |
1219 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1220 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
1221 | else if (IS_I965G(dev)) | ||
1222 | i915_enable_pipestat(dev_priv, pipe, | 1467 | i915_enable_pipestat(dev_priv, pipe, |
1223 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1468 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1224 | else | 1469 | else |
1225 | i915_enable_pipestat(dev_priv, pipe, | 1470 | i915_enable_pipestat(dev_priv, pipe, |
1226 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1471 | PIPE_VBLANK_INTERRUPT_ENABLE); |
1227 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1472 | |
1473 | /* maintain vblank delivery even in deep C-states */ | ||
1474 | if (dev_priv->info->gen == 3) | ||
1475 | I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); | ||
1476 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | |||
1481 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | ||
1482 | { | ||
1483 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1484 | unsigned long irqflags; | ||
1485 | |||
1486 | if (!i915_pipe_enabled(dev, pipe)) | ||
1487 | return -EINVAL; | ||
1488 | |||
1489 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1490 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1491 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
1492 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) | ||
1498 | { | ||
1499 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1500 | unsigned long irqflags; | ||
1501 | |||
1502 | if (!i915_pipe_enabled(dev, pipe)) | ||
1503 | return -EINVAL; | ||
1504 | |||
1505 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1506 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1507 | DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); | ||
1508 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1509 | |||
1228 | return 0; | 1510 | return 0; |
1229 | } | 1511 | } |
1230 | 1512 | ||
1231 | /* Called from drm generic code, passed 'crtc' which | 1513 | /* Called from drm generic code, passed 'crtc' which |
1232 | * we use as a pipe index | 1514 | * we use as a pipe index |
1233 | */ | 1515 | */ |
1234 | void i915_disable_vblank(struct drm_device *dev, int pipe) | 1516 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
1235 | { | 1517 | { |
1236 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1518 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1237 | unsigned long irqflags; | 1519 | unsigned long irqflags; |
1238 | 1520 | ||
1239 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1521 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1240 | if (HAS_PCH_SPLIT(dev)) | 1522 | if (dev_priv->info->gen == 3) |
1241 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1523 | I915_WRITE(INSTPM, |
1242 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1524 | INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); |
1243 | else | 1525 | |
1244 | i915_disable_pipestat(dev_priv, pipe, | 1526 | i915_disable_pipestat(dev_priv, pipe, |
1245 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1527 | PIPE_VBLANK_INTERRUPT_ENABLE | |
1246 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1528 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1247 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1529 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1248 | } | 1530 | } |
1249 | 1531 | ||
1250 | void i915_enable_interrupt (struct drm_device *dev) | 1532 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
1251 | { | 1533 | { |
1252 | struct drm_i915_private *dev_priv = dev->dev_private; | 1534 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1535 | unsigned long irqflags; | ||
1253 | 1536 | ||
1254 | if (!HAS_PCH_SPLIT(dev)) | 1537 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1255 | opregion_enable_asle(dev); | 1538 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1256 | dev_priv->irq_enabled = 1; | 1539 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1540 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1257 | } | 1541 | } |
1258 | 1542 | ||
1543 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) | ||
1544 | { | ||
1545 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1546 | unsigned long irqflags; | ||
1547 | |||
1548 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1549 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | ||
1550 | DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); | ||
1551 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1552 | } | ||
1259 | 1553 | ||
1260 | /* Set the vblank monitor pipe | 1554 | /* Set the vblank monitor pipe |
1261 | */ | 1555 | */ |
@@ -1311,12 +1605,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
1311 | return -EINVAL; | 1605 | return -EINVAL; |
1312 | } | 1606 | } |
1313 | 1607 | ||
1314 | struct drm_i915_gem_request * | 1608 | static u32 |
1315 | i915_get_tail_request(struct drm_device *dev) | 1609 | ring_last_seqno(struct intel_ring_buffer *ring) |
1316 | { | 1610 | { |
1317 | drm_i915_private_t *dev_priv = dev->dev_private; | 1611 | return list_entry(ring->request_list.prev, |
1318 | return list_entry(dev_priv->render_ring.request_list.prev, | 1612 | struct drm_i915_gem_request, list)->seqno; |
1319 | struct drm_i915_gem_request, list); | 1613 | } |
1614 | |||
1615 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | ||
1616 | { | ||
1617 | if (list_empty(&ring->request_list) || | ||
1618 | i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { | ||
1619 | /* Issue a wake-up to catch stuck h/w. */ | ||
1620 | if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { | ||
1621 | DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", | ||
1622 | ring->name, | ||
1623 | ring->waiting_seqno, | ||
1624 | ring->get_seqno(ring)); | ||
1625 | wake_up_all(&ring->irq_queue); | ||
1626 | *err = true; | ||
1627 | } | ||
1628 | return true; | ||
1629 | } | ||
1630 | return false; | ||
1631 | } | ||
1632 | |||
1633 | static bool kick_ring(struct intel_ring_buffer *ring) | ||
1634 | { | ||
1635 | struct drm_device *dev = ring->dev; | ||
1636 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1637 | u32 tmp = I915_READ_CTL(ring); | ||
1638 | if (tmp & RING_WAIT) { | ||
1639 | DRM_ERROR("Kicking stuck wait on %s\n", | ||
1640 | ring->name); | ||
1641 | I915_WRITE_CTL(ring, tmp); | ||
1642 | return true; | ||
1643 | } | ||
1644 | if (IS_GEN6(dev) && | ||
1645 | (tmp & RING_WAIT_SEMAPHORE)) { | ||
1646 | DRM_ERROR("Kicking stuck semaphore on %s\n", | ||
1647 | ring->name); | ||
1648 | I915_WRITE_CTL(ring, tmp); | ||
1649 | return true; | ||
1650 | } | ||
1651 | return false; | ||
1320 | } | 1652 | } |
1321 | 1653 | ||
1322 | /** | 1654 | /** |
@@ -1330,12 +1662,19 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1330 | struct drm_device *dev = (struct drm_device *)data; | 1662 | struct drm_device *dev = (struct drm_device *)data; |
1331 | drm_i915_private_t *dev_priv = dev->dev_private; | 1663 | drm_i915_private_t *dev_priv = dev->dev_private; |
1332 | uint32_t acthd, instdone, instdone1; | 1664 | uint32_t acthd, instdone, instdone1; |
1665 | bool err = false; | ||
1333 | 1666 | ||
1334 | /* No reset support on this chip yet. */ | 1667 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1335 | if (IS_GEN6(dev)) | 1668 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && |
1669 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | ||
1670 | i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { | ||
1671 | dev_priv->hangcheck_count = 0; | ||
1672 | if (err) | ||
1673 | goto repeat; | ||
1336 | return; | 1674 | return; |
1675 | } | ||
1337 | 1676 | ||
1338 | if (!IS_I965G(dev)) { | 1677 | if (INTEL_INFO(dev)->gen < 4) { |
1339 | acthd = I915_READ(ACTHD); | 1678 | acthd = I915_READ(ACTHD); |
1340 | instdone = I915_READ(INSTDONE); | 1679 | instdone = I915_READ(INSTDONE); |
1341 | instdone1 = 0; | 1680 | instdone1 = 0; |
@@ -1345,38 +1684,31 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1345 | instdone1 = I915_READ(INSTDONE1); | 1684 | instdone1 = I915_READ(INSTDONE1); |
1346 | } | 1685 | } |
1347 | 1686 | ||
1348 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1349 | if (list_empty(&dev_priv->render_ring.request_list) || | ||
1350 | i915_seqno_passed(i915_get_gem_seqno(dev, | ||
1351 | &dev_priv->render_ring), | ||
1352 | i915_get_tail_request(dev)->seqno)) { | ||
1353 | bool missed_wakeup = false; | ||
1354 | |||
1355 | dev_priv->hangcheck_count = 0; | ||
1356 | |||
1357 | /* Issue a wake-up to catch stuck h/w. */ | ||
1358 | if (dev_priv->render_ring.waiting_gem_seqno && | ||
1359 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { | ||
1360 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1361 | missed_wakeup = true; | ||
1362 | } | ||
1363 | |||
1364 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1365 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1366 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1367 | missed_wakeup = true; | ||
1368 | } | ||
1369 | |||
1370 | if (missed_wakeup) | ||
1371 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1372 | return; | ||
1373 | } | ||
1374 | |||
1375 | if (dev_priv->last_acthd == acthd && | 1687 | if (dev_priv->last_acthd == acthd && |
1376 | dev_priv->last_instdone == instdone && | 1688 | dev_priv->last_instdone == instdone && |
1377 | dev_priv->last_instdone1 == instdone1) { | 1689 | dev_priv->last_instdone1 == instdone1) { |
1378 | if (dev_priv->hangcheck_count++ > 1) { | 1690 | if (dev_priv->hangcheck_count++ > 1) { |
1379 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1691 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1692 | |||
1693 | if (!IS_GEN2(dev)) { | ||
1694 | /* Is the chip hanging on a WAIT_FOR_EVENT? | ||
1695 | * If so we can simply poke the RB_WAIT bit | ||
1696 | * and break the hang. This should work on | ||
1697 | * all but the second generation chipsets. | ||
1698 | */ | ||
1699 | |||
1700 | if (kick_ring(&dev_priv->ring[RCS])) | ||
1701 | goto repeat; | ||
1702 | |||
1703 | if (HAS_BSD(dev) && | ||
1704 | kick_ring(&dev_priv->ring[VCS])) | ||
1705 | goto repeat; | ||
1706 | |||
1707 | if (HAS_BLT(dev) && | ||
1708 | kick_ring(&dev_priv->ring[BCS])) | ||
1709 | goto repeat; | ||
1710 | } | ||
1711 | |||
1380 | i915_handle_error(dev, true); | 1712 | i915_handle_error(dev, true); |
1381 | return; | 1713 | return; |
1382 | } | 1714 | } |
@@ -1388,8 +1720,10 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1388 | dev_priv->last_instdone1 = instdone1; | 1720 | dev_priv->last_instdone1 = instdone1; |
1389 | } | 1721 | } |
1390 | 1722 | ||
1723 | repeat: | ||
1391 | /* Reset timer case chip hangs without another request being added */ | 1724 | /* Reset timer case chip hangs without another request being added */ |
1392 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1725 | mod_timer(&dev_priv->hangcheck_timer, |
1726 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1393 | } | 1727 | } |
1394 | 1728 | ||
1395 | /* drm_dma.h hooks | 1729 | /* drm_dma.h hooks |
@@ -1398,23 +1732,41 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1398 | { | 1732 | { |
1399 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1733 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1400 | 1734 | ||
1735 | atomic_set(&dev_priv->irq_received, 0); | ||
1736 | |||
1737 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | ||
1738 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | ||
1739 | if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) | ||
1740 | INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); | ||
1741 | |||
1401 | I915_WRITE(HWSTAM, 0xeffe); | 1742 | I915_WRITE(HWSTAM, 0xeffe); |
1743 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | ||
1744 | /* Workaround stalls observed on Sandy Bridge GPUs by | ||
1745 | * making the blitter command streamer generate a | ||
1746 | * write to the Hardware Status Page for | ||
1747 | * MI_USER_INTERRUPT. This appears to serialize the | ||
1748 | * previous seqno write out before the interrupt | ||
1749 | * happens. | ||
1750 | */ | ||
1751 | I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1752 | I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); | ||
1753 | } | ||
1402 | 1754 | ||
1403 | /* XXX hotplug from PCH */ | 1755 | /* XXX hotplug from PCH */ |
1404 | 1756 | ||
1405 | I915_WRITE(DEIMR, 0xffffffff); | 1757 | I915_WRITE(DEIMR, 0xffffffff); |
1406 | I915_WRITE(DEIER, 0x0); | 1758 | I915_WRITE(DEIER, 0x0); |
1407 | (void) I915_READ(DEIER); | 1759 | POSTING_READ(DEIER); |
1408 | 1760 | ||
1409 | /* and GT */ | 1761 | /* and GT */ |
1410 | I915_WRITE(GTIMR, 0xffffffff); | 1762 | I915_WRITE(GTIMR, 0xffffffff); |
1411 | I915_WRITE(GTIER, 0x0); | 1763 | I915_WRITE(GTIER, 0x0); |
1412 | (void) I915_READ(GTIER); | 1764 | POSTING_READ(GTIER); |
1413 | 1765 | ||
1414 | /* south display irq */ | 1766 | /* south display irq */ |
1415 | I915_WRITE(SDEIMR, 0xffffffff); | 1767 | I915_WRITE(SDEIMR, 0xffffffff); |
1416 | I915_WRITE(SDEIER, 0x0); | 1768 | I915_WRITE(SDEIER, 0x0); |
1417 | (void) I915_READ(SDEIER); | 1769 | POSTING_READ(SDEIER); |
1418 | } | 1770 | } |
1419 | 1771 | ||
1420 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1772 | static int ironlake_irq_postinstall(struct drm_device *dev) |
@@ -1423,40 +1775,61 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1423 | /* enable kind of interrupts always enabled */ | 1775 | /* enable kind of interrupts always enabled */ |
1424 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1776 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1425 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1777 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1426 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1778 | u32 render_irqs; |
1427 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1779 | u32 hotplug_mask; |
1428 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1780 | |
1781 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); | ||
1782 | if (HAS_BSD(dev)) | ||
1783 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); | ||
1784 | if (HAS_BLT(dev)) | ||
1785 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); | ||
1429 | 1786 | ||
1430 | dev_priv->irq_mask_reg = ~display_mask; | 1787 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1431 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | 1788 | dev_priv->irq_mask = ~display_mask; |
1432 | 1789 | ||
1433 | /* should always can generate irq */ | 1790 | /* should always can generate irq */ |
1434 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1791 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
1435 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 1792 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
1436 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1793 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
1437 | (void) I915_READ(DEIER); | 1794 | POSTING_READ(DEIER); |
1438 | |||
1439 | /* Gen6 only needs render pipe_control now */ | ||
1440 | if (IS_GEN6(dev)) | ||
1441 | render_mask = GT_PIPE_NOTIFY; | ||
1442 | 1795 | ||
1443 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1796 | dev_priv->gt_irq_mask = ~0; |
1444 | dev_priv->gt_irq_enable_reg = render_mask; | ||
1445 | 1797 | ||
1446 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1798 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1447 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | 1799 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1800 | |||
1448 | if (IS_GEN6(dev)) | 1801 | if (IS_GEN6(dev)) |
1449 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | 1802 | render_irqs = |
1450 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1803 | GT_USER_INTERRUPT | |
1451 | (void) I915_READ(GTIER); | 1804 | GT_GEN6_BSD_USER_INTERRUPT | |
1805 | GT_BLT_USER_INTERRUPT; | ||
1806 | else | ||
1807 | render_irqs = | ||
1808 | GT_USER_INTERRUPT | | ||
1809 | GT_PIPE_NOTIFY | | ||
1810 | GT_BSD_USER_INTERRUPT; | ||
1811 | I915_WRITE(GTIER, render_irqs); | ||
1812 | POSTING_READ(GTIER); | ||
1813 | |||
1814 | if (HAS_PCH_CPT(dev)) { | ||
1815 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | ||
1816 | SDE_PORTB_HOTPLUG_CPT | | ||
1817 | SDE_PORTC_HOTPLUG_CPT | | ||
1818 | SDE_PORTD_HOTPLUG_CPT); | ||
1819 | } else { | ||
1820 | hotplug_mask = (SDE_CRT_HOTPLUG | | ||
1821 | SDE_PORTB_HOTPLUG | | ||
1822 | SDE_PORTC_HOTPLUG | | ||
1823 | SDE_PORTD_HOTPLUG | | ||
1824 | SDE_AUX_MASK); | ||
1825 | } | ||
1452 | 1826 | ||
1453 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1827 | dev_priv->pch_irq_mask = ~hotplug_mask; |
1454 | dev_priv->pch_irq_enable_reg = hotplug_mask; | ||
1455 | 1828 | ||
1456 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1829 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
1457 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); | 1830 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
1458 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1831 | I915_WRITE(SDEIER, hotplug_mask); |
1459 | (void) I915_READ(SDEIER); | 1832 | POSTING_READ(SDEIER); |
1460 | 1833 | ||
1461 | if (IS_IRONLAKE_M(dev)) { | 1834 | if (IS_IRONLAKE_M(dev)) { |
1462 | /* Clear & enable PCU event interrupts */ | 1835 | /* Clear & enable PCU event interrupts */ |
@@ -1468,55 +1841,93 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1468 | return 0; | 1841 | return 0; |
1469 | } | 1842 | } |
1470 | 1843 | ||
1471 | void i915_driver_irq_preinstall(struct drm_device * dev) | 1844 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
1845 | { | ||
1846 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1847 | /* enable kind of interrupts always enabled */ | ||
1848 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | ||
1849 | DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | | ||
1850 | DE_PLANEB_FLIP_DONE_IVB; | ||
1851 | u32 render_irqs; | ||
1852 | u32 hotplug_mask; | ||
1853 | |||
1854 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); | ||
1855 | if (HAS_BSD(dev)) | ||
1856 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); | ||
1857 | if (HAS_BLT(dev)) | ||
1858 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); | ||
1859 | |||
1860 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | ||
1861 | dev_priv->irq_mask = ~display_mask; | ||
1862 | |||
1863 | /* should always can generate irq */ | ||
1864 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | ||
1865 | I915_WRITE(DEIMR, dev_priv->irq_mask); | ||
1866 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | | ||
1867 | DE_PIPEB_VBLANK_IVB); | ||
1868 | POSTING_READ(DEIER); | ||
1869 | |||
1870 | dev_priv->gt_irq_mask = ~0; | ||
1871 | |||
1872 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||
1873 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
1874 | |||
1875 | render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | | ||
1876 | GT_BLT_USER_INTERRUPT; | ||
1877 | I915_WRITE(GTIER, render_irqs); | ||
1878 | POSTING_READ(GTIER); | ||
1879 | |||
1880 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | ||
1881 | SDE_PORTB_HOTPLUG_CPT | | ||
1882 | SDE_PORTC_HOTPLUG_CPT | | ||
1883 | SDE_PORTD_HOTPLUG_CPT); | ||
1884 | dev_priv->pch_irq_mask = ~hotplug_mask; | ||
1885 | |||
1886 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | ||
1887 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | ||
1888 | I915_WRITE(SDEIER, hotplug_mask); | ||
1889 | POSTING_READ(SDEIER); | ||
1890 | |||
1891 | return 0; | ||
1892 | } | ||
1893 | |||
1894 | static void i915_driver_irq_preinstall(struct drm_device * dev) | ||
1472 | { | 1895 | { |
1473 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1896 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1897 | int pipe; | ||
1474 | 1898 | ||
1475 | atomic_set(&dev_priv->irq_received, 0); | 1899 | atomic_set(&dev_priv->irq_received, 0); |
1476 | 1900 | ||
1477 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 1901 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
1478 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 1902 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
1479 | 1903 | ||
1480 | if (HAS_PCH_SPLIT(dev)) { | ||
1481 | ironlake_irq_preinstall(dev); | ||
1482 | return; | ||
1483 | } | ||
1484 | |||
1485 | if (I915_HAS_HOTPLUG(dev)) { | 1904 | if (I915_HAS_HOTPLUG(dev)) { |
1486 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 1905 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
1487 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 1906 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
1488 | } | 1907 | } |
1489 | 1908 | ||
1490 | I915_WRITE(HWSTAM, 0xeffe); | 1909 | I915_WRITE(HWSTAM, 0xeffe); |
1491 | I915_WRITE(PIPEASTAT, 0); | 1910 | for_each_pipe(pipe) |
1492 | I915_WRITE(PIPEBSTAT, 0); | 1911 | I915_WRITE(PIPESTAT(pipe), 0); |
1493 | I915_WRITE(IMR, 0xffffffff); | 1912 | I915_WRITE(IMR, 0xffffffff); |
1494 | I915_WRITE(IER, 0x0); | 1913 | I915_WRITE(IER, 0x0); |
1495 | (void) I915_READ(IER); | 1914 | POSTING_READ(IER); |
1496 | } | 1915 | } |
1497 | 1916 | ||
1498 | /* | 1917 | /* |
1499 | * Must be called after intel_modeset_init or hotplug interrupts won't be | 1918 | * Must be called after intel_modeset_init or hotplug interrupts won't be |
1500 | * enabled correctly. | 1919 | * enabled correctly. |
1501 | */ | 1920 | */ |
1502 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1921 | static int i915_driver_irq_postinstall(struct drm_device *dev) |
1503 | { | 1922 | { |
1504 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1923 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1505 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 1924 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
1506 | u32 error_mask; | 1925 | u32 error_mask; |
1507 | 1926 | ||
1508 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); | ||
1509 | |||
1510 | if (HAS_BSD(dev)) | ||
1511 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | ||
1512 | |||
1513 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1927 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1514 | 1928 | ||
1515 | if (HAS_PCH_SPLIT(dev)) | ||
1516 | return ironlake_irq_postinstall(dev); | ||
1517 | |||
1518 | /* Unmask the interrupts that we always want on. */ | 1929 | /* Unmask the interrupts that we always want on. */ |
1519 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | 1930 | dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; |
1520 | 1931 | ||
1521 | dev_priv->pipestat[0] = 0; | 1932 | dev_priv->pipestat[0] = 0; |
1522 | dev_priv->pipestat[1] = 0; | 1933 | dev_priv->pipestat[1] = 0; |
@@ -1525,7 +1936,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1525 | /* Enable in IER... */ | 1936 | /* Enable in IER... */ |
1526 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1937 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1527 | /* and unmask in IMR */ | 1938 | /* and unmask in IMR */ |
1528 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; | 1939 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
1529 | } | 1940 | } |
1530 | 1941 | ||
1531 | /* | 1942 | /* |
@@ -1543,9 +1954,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1543 | } | 1954 | } |
1544 | I915_WRITE(EMR, error_mask); | 1955 | I915_WRITE(EMR, error_mask); |
1545 | 1956 | ||
1546 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1957 | I915_WRITE(IMR, dev_priv->irq_mask); |
1547 | I915_WRITE(IER, enable_mask); | 1958 | I915_WRITE(IER, enable_mask); |
1548 | (void) I915_READ(IER); | 1959 | POSTING_READ(IER); |
1549 | 1960 | ||
1550 | if (I915_HAS_HOTPLUG(dev)) { | 1961 | if (I915_HAS_HOTPLUG(dev)) { |
1551 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1962 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
@@ -1578,7 +1989,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1578 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1989 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
1579 | } | 1990 | } |
1580 | 1991 | ||
1581 | opregion_enable_asle(dev); | 1992 | intel_opregion_enable_asle(dev); |
1582 | 1993 | ||
1583 | return 0; | 1994 | return 0; |
1584 | } | 1995 | } |
@@ -1586,6 +1997,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1586 | static void ironlake_irq_uninstall(struct drm_device *dev) | 1997 | static void ironlake_irq_uninstall(struct drm_device *dev) |
1587 | { | 1998 | { |
1588 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1999 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2000 | |||
2001 | if (!dev_priv) | ||
2002 | return; | ||
2003 | |||
2004 | dev_priv->vblank_pipe = 0; | ||
2005 | |||
1589 | I915_WRITE(HWSTAM, 0xffffffff); | 2006 | I915_WRITE(HWSTAM, 0xffffffff); |
1590 | 2007 | ||
1591 | I915_WRITE(DEIMR, 0xffffffff); | 2008 | I915_WRITE(DEIMR, 0xffffffff); |
@@ -1597,32 +2014,67 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
1597 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2014 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1598 | } | 2015 | } |
1599 | 2016 | ||
1600 | void i915_driver_irq_uninstall(struct drm_device * dev) | 2017 | static void i915_driver_irq_uninstall(struct drm_device * dev) |
1601 | { | 2018 | { |
1602 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2019 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2020 | int pipe; | ||
1603 | 2021 | ||
1604 | if (!dev_priv) | 2022 | if (!dev_priv) |
1605 | return; | 2023 | return; |
1606 | 2024 | ||
1607 | dev_priv->vblank_pipe = 0; | 2025 | dev_priv->vblank_pipe = 0; |
1608 | 2026 | ||
1609 | if (HAS_PCH_SPLIT(dev)) { | ||
1610 | ironlake_irq_uninstall(dev); | ||
1611 | return; | ||
1612 | } | ||
1613 | |||
1614 | if (I915_HAS_HOTPLUG(dev)) { | 2027 | if (I915_HAS_HOTPLUG(dev)) { |
1615 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2028 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
1616 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2029 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
1617 | } | 2030 | } |
1618 | 2031 | ||
1619 | I915_WRITE(HWSTAM, 0xffffffff); | 2032 | I915_WRITE(HWSTAM, 0xffffffff); |
1620 | I915_WRITE(PIPEASTAT, 0); | 2033 | for_each_pipe(pipe) |
1621 | I915_WRITE(PIPEBSTAT, 0); | 2034 | I915_WRITE(PIPESTAT(pipe), 0); |
1622 | I915_WRITE(IMR, 0xffffffff); | 2035 | I915_WRITE(IMR, 0xffffffff); |
1623 | I915_WRITE(IER, 0x0); | 2036 | I915_WRITE(IER, 0x0); |
1624 | 2037 | ||
1625 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | 2038 | for_each_pipe(pipe) |
1626 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | 2039 | I915_WRITE(PIPESTAT(pipe), |
2040 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | ||
1627 | I915_WRITE(IIR, I915_READ(IIR)); | 2041 | I915_WRITE(IIR, I915_READ(IIR)); |
1628 | } | 2042 | } |
2043 | |||
2044 | void intel_irq_init(struct drm_device *dev) | ||
2045 | { | ||
2046 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
2047 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
2048 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { | ||
2049 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | ||
2050 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | ||
2055 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | ||
2056 | |||
2057 | if (IS_IVYBRIDGE(dev)) { | ||
2058 | /* Share pre & uninstall handlers with ILK/SNB */ | ||
2059 | dev->driver->irq_handler = ivybridge_irq_handler; | ||
2060 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | ||
2061 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; | ||
2062 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | ||
2063 | dev->driver->enable_vblank = ivybridge_enable_vblank; | ||
2064 | dev->driver->disable_vblank = ivybridge_disable_vblank; | ||
2065 | } else if (HAS_PCH_SPLIT(dev)) { | ||
2066 | dev->driver->irq_handler = ironlake_irq_handler; | ||
2067 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | ||
2068 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | ||
2069 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | ||
2070 | dev->driver->enable_vblank = ironlake_enable_vblank; | ||
2071 | dev->driver->disable_vblank = ironlake_disable_vblank; | ||
2072 | } else { | ||
2073 | dev->driver->irq_preinstall = i915_driver_irq_preinstall; | ||
2074 | dev->driver->irq_postinstall = i915_driver_irq_postinstall; | ||
2075 | dev->driver->irq_uninstall = i915_driver_irq_uninstall; | ||
2076 | dev->driver->irq_handler = i915_driver_irq_handler; | ||
2077 | dev->driver->enable_vblank = i915_enable_vblank; | ||
2078 | dev->driver->disable_vblank = i915_disable_vblank; | ||
2079 | } | ||
2080 | } | ||