diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-04-26 17:28:05 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-03 05:18:26 -0400 |
commit | 6d90c952cdd20158ec41a5c016c6fad73c9a8749 (patch) | |
tree | 21593af3ce72606e92b1bbb85d4eb453254f0e55 /drivers | |
parent | 64c43c332156261d72e50e929203de116b1129a7 (diff) |
drm/i915: remove LP_RING&friends from modeset code
The LP refers to 'low priority' as opposed to the high priority
ring on gen2/3. So lets constrain its use to the code of that era.
Unfortunately we can't yet completely remove the associated
macros from common headers and shove them into i915_dma.c to
the other dri1 legacy support code, a few cleanups are still
missing for that.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 27 |
3 files changed, 87 insertions, 76 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e1716be47656..278c0f071585 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5749,16 +5749,17 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
5749 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5749 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5750 | unsigned long offset; | 5750 | unsigned long offset; |
5751 | u32 flip_mask; | 5751 | u32 flip_mask; |
5752 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
5752 | int ret; | 5753 | int ret; |
5753 | 5754 | ||
5754 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | 5755 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
5755 | if (ret) | 5756 | if (ret) |
5756 | goto err; | 5757 | goto err; |
5757 | 5758 | ||
5758 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5759 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5759 | offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; | 5760 | offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; |
5760 | 5761 | ||
5761 | ret = BEGIN_LP_RING(6); | 5762 | ret = intel_ring_begin(ring, 6); |
5762 | if (ret) | 5763 | if (ret) |
5763 | goto err_unpin; | 5764 | goto err_unpin; |
5764 | 5765 | ||
@@ -5769,14 +5770,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
5769 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 5770 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
5770 | else | 5771 | else |
5771 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 5772 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
5772 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | 5773 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
5773 | OUT_RING(MI_NOOP); | 5774 | intel_ring_emit(ring, MI_NOOP); |
5774 | OUT_RING(MI_DISPLAY_FLIP | | 5775 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
5775 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5776 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5776 | OUT_RING(fb->pitches[0]); | 5777 | intel_ring_emit(ring, fb->pitches[0]); |
5777 | OUT_RING(obj->gtt_offset + offset); | 5778 | intel_ring_emit(ring, obj->gtt_offset + offset); |
5778 | OUT_RING(0); /* aux display base address, unused */ | 5779 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
5779 | ADVANCE_LP_RING(); | 5780 | intel_ring_advance(ring); |
5780 | return 0; | 5781 | return 0; |
5781 | 5782 | ||
5782 | err_unpin: | 5783 | err_unpin: |
@@ -5794,16 +5795,17 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
5794 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5795 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5795 | unsigned long offset; | 5796 | unsigned long offset; |
5796 | u32 flip_mask; | 5797 | u32 flip_mask; |
5798 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
5797 | int ret; | 5799 | int ret; |
5798 | 5800 | ||
5799 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | 5801 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
5800 | if (ret) | 5802 | if (ret) |
5801 | goto err; | 5803 | goto err; |
5802 | 5804 | ||
5803 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5805 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5804 | offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; | 5806 | offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; |
5805 | 5807 | ||
5806 | ret = BEGIN_LP_RING(6); | 5808 | ret = intel_ring_begin(ring, 6); |
5807 | if (ret) | 5809 | if (ret) |
5808 | goto err_unpin; | 5810 | goto err_unpin; |
5809 | 5811 | ||
@@ -5811,15 +5813,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
5811 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 5813 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
5812 | else | 5814 | else |
5813 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 5815 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
5814 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | 5816 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
5815 | OUT_RING(MI_NOOP); | 5817 | intel_ring_emit(ring, MI_NOOP); |
5816 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5818 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | |
5817 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5819 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5818 | OUT_RING(fb->pitches[0]); | 5820 | intel_ring_emit(ring, fb->pitches[0]); |
5819 | OUT_RING(obj->gtt_offset + offset); | 5821 | intel_ring_emit(ring, obj->gtt_offset + offset); |
5820 | OUT_RING(MI_NOOP); | 5822 | intel_ring_emit(ring, MI_NOOP); |
5821 | 5823 | ||
5822 | ADVANCE_LP_RING(); | 5824 | intel_ring_advance(ring); |
5823 | return 0; | 5825 | return 0; |
5824 | 5826 | ||
5825 | err_unpin: | 5827 | err_unpin: |
@@ -5836,13 +5838,14 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
5836 | struct drm_i915_private *dev_priv = dev->dev_private; | 5838 | struct drm_i915_private *dev_priv = dev->dev_private; |
5837 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5839 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5838 | uint32_t pf, pipesrc; | 5840 | uint32_t pf, pipesrc; |
5841 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
5839 | int ret; | 5842 | int ret; |
5840 | 5843 | ||
5841 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | 5844 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
5842 | if (ret) | 5845 | if (ret) |
5843 | goto err; | 5846 | goto err; |
5844 | 5847 | ||
5845 | ret = BEGIN_LP_RING(4); | 5848 | ret = intel_ring_begin(ring, 4); |
5846 | if (ret) | 5849 | if (ret) |
5847 | goto err_unpin; | 5850 | goto err_unpin; |
5848 | 5851 | ||
@@ -5850,10 +5853,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
5850 | * Display Registers (which do not change across a page-flip) | 5853 | * Display Registers (which do not change across a page-flip) |
5851 | * so we need only reprogram the base address. | 5854 | * so we need only reprogram the base address. |
5852 | */ | 5855 | */ |
5853 | OUT_RING(MI_DISPLAY_FLIP | | 5856 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
5854 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5857 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5855 | OUT_RING(fb->pitches[0]); | 5858 | intel_ring_emit(ring, fb->pitches[0]); |
5856 | OUT_RING(obj->gtt_offset | obj->tiling_mode); | 5859 | intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode); |
5857 | 5860 | ||
5858 | /* XXX Enabling the panel-fitter across page-flip is so far | 5861 | /* XXX Enabling the panel-fitter across page-flip is so far |
5859 | * untested on non-native modes, so ignore it for now. | 5862 | * untested on non-native modes, so ignore it for now. |
@@ -5861,8 +5864,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
5861 | */ | 5864 | */ |
5862 | pf = 0; | 5865 | pf = 0; |
5863 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 5866 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
5864 | OUT_RING(pf | pipesrc); | 5867 | intel_ring_emit(ring, pf | pipesrc); |
5865 | ADVANCE_LP_RING(); | 5868 | intel_ring_advance(ring); |
5866 | return 0; | 5869 | return 0; |
5867 | 5870 | ||
5868 | err_unpin: | 5871 | err_unpin: |
@@ -5878,26 +5881,27 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | |||
5878 | { | 5881 | { |
5879 | struct drm_i915_private *dev_priv = dev->dev_private; | 5882 | struct drm_i915_private *dev_priv = dev->dev_private; |
5880 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5883 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5884 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
5881 | uint32_t pf, pipesrc; | 5885 | uint32_t pf, pipesrc; |
5882 | int ret; | 5886 | int ret; |
5883 | 5887 | ||
5884 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | 5888 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
5885 | if (ret) | 5889 | if (ret) |
5886 | goto err; | 5890 | goto err; |
5887 | 5891 | ||
5888 | ret = BEGIN_LP_RING(4); | 5892 | ret = intel_ring_begin(ring, 4); |
5889 | if (ret) | 5893 | if (ret) |
5890 | goto err_unpin; | 5894 | goto err_unpin; |
5891 | 5895 | ||
5892 | OUT_RING(MI_DISPLAY_FLIP | | 5896 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
5893 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5897 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5894 | OUT_RING(fb->pitches[0] | obj->tiling_mode); | 5898 | intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); |
5895 | OUT_RING(obj->gtt_offset); | 5899 | intel_ring_emit(ring, obj->gtt_offset); |
5896 | 5900 | ||
5897 | pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; | 5901 | pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; |
5898 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 5902 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
5899 | OUT_RING(pf | pipesrc); | 5903 | intel_ring_emit(ring, pf | pipesrc); |
5900 | ADVANCE_LP_RING(); | 5904 | intel_ring_advance(ring); |
5901 | return 0; | 5905 | return 0; |
5902 | 5906 | ||
5903 | err_unpin: | 5907 | err_unpin: |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index e06e46a30757..0bfab0bf60f4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -215,17 +215,18 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
215 | { | 215 | { |
216 | struct drm_device *dev = overlay->dev; | 216 | struct drm_device *dev = overlay->dev; |
217 | drm_i915_private_t *dev_priv = dev->dev_private; | 217 | drm_i915_private_t *dev_priv = dev->dev_private; |
218 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
218 | int ret; | 219 | int ret; |
219 | 220 | ||
220 | BUG_ON(overlay->last_flip_req); | 221 | BUG_ON(overlay->last_flip_req); |
221 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); | 222 | ret = i915_add_request(ring, NULL, request); |
222 | if (ret) { | 223 | if (ret) { |
223 | kfree(request); | 224 | kfree(request); |
224 | return ret; | 225 | return ret; |
225 | } | 226 | } |
226 | overlay->last_flip_req = request->seqno; | 227 | overlay->last_flip_req = request->seqno; |
227 | overlay->flip_tail = tail; | 228 | overlay->flip_tail = tail; |
228 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 229 | ret = i915_wait_request(ring, overlay->last_flip_req); |
229 | if (ret) | 230 | if (ret) |
230 | return ret; | 231 | return ret; |
231 | i915_gem_retire_requests(dev); | 232 | i915_gem_retire_requests(dev); |
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
287 | { | 288 | { |
288 | struct drm_device *dev = overlay->dev; | 289 | struct drm_device *dev = overlay->dev; |
289 | struct drm_i915_private *dev_priv = dev->dev_private; | 290 | struct drm_i915_private *dev_priv = dev->dev_private; |
291 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
290 | struct drm_i915_gem_request *request; | 292 | struct drm_i915_gem_request *request; |
291 | int pipe_a_quirk = 0; | 293 | int pipe_a_quirk = 0; |
292 | int ret; | 294 | int ret; |
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
306 | goto out; | 308 | goto out; |
307 | } | 309 | } |
308 | 310 | ||
309 | ret = BEGIN_LP_RING(4); | 311 | ret = intel_ring_begin(ring, 4); |
310 | if (ret) { | 312 | if (ret) { |
311 | kfree(request); | 313 | kfree(request); |
312 | goto out; | 314 | goto out; |
313 | } | 315 | } |
314 | 316 | ||
315 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 317 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
316 | OUT_RING(overlay->flip_addr | OFC_UPDATE); | 318 | intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); |
317 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 319 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
318 | OUT_RING(MI_NOOP); | 320 | intel_ring_emit(ring, MI_NOOP); |
319 | ADVANCE_LP_RING(); | 321 | intel_ring_advance(ring); |
320 | 322 | ||
321 | ret = intel_overlay_do_wait_request(overlay, request, NULL); | 323 | ret = intel_overlay_do_wait_request(overlay, request, NULL); |
322 | out: | 324 | out: |
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
332 | { | 334 | { |
333 | struct drm_device *dev = overlay->dev; | 335 | struct drm_device *dev = overlay->dev; |
334 | drm_i915_private_t *dev_priv = dev->dev_private; | 336 | drm_i915_private_t *dev_priv = dev->dev_private; |
337 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
335 | struct drm_i915_gem_request *request; | 338 | struct drm_i915_gem_request *request; |
336 | u32 flip_addr = overlay->flip_addr; | 339 | u32 flip_addr = overlay->flip_addr; |
337 | u32 tmp; | 340 | u32 tmp; |
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
351 | if (tmp & (1 << 17)) | 354 | if (tmp & (1 << 17)) |
352 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 355 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
353 | 356 | ||
354 | ret = BEGIN_LP_RING(2); | 357 | ret = intel_ring_begin(ring, 2); |
355 | if (ret) { | 358 | if (ret) { |
356 | kfree(request); | 359 | kfree(request); |
357 | return ret; | 360 | return ret; |
358 | } | 361 | } |
359 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 362 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
360 | OUT_RING(flip_addr); | 363 | intel_ring_emit(ring, flip_addr); |
361 | ADVANCE_LP_RING(); | 364 | intel_ring_advance(ring); |
362 | 365 | ||
363 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); | 366 | ret = i915_add_request(ring, NULL, request); |
364 | if (ret) { | 367 | if (ret) { |
365 | kfree(request); | 368 | kfree(request); |
366 | return ret; | 369 | return ret; |
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
401 | { | 404 | { |
402 | struct drm_device *dev = overlay->dev; | 405 | struct drm_device *dev = overlay->dev; |
403 | struct drm_i915_private *dev_priv = dev->dev_private; | 406 | struct drm_i915_private *dev_priv = dev->dev_private; |
407 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
404 | u32 flip_addr = overlay->flip_addr; | 408 | u32 flip_addr = overlay->flip_addr; |
405 | struct drm_i915_gem_request *request; | 409 | struct drm_i915_gem_request *request; |
406 | int ret; | 410 | int ret; |
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
417 | * of the hw. Do it in both cases */ | 421 | * of the hw. Do it in both cases */ |
418 | flip_addr |= OFC_UPDATE; | 422 | flip_addr |= OFC_UPDATE; |
419 | 423 | ||
420 | ret = BEGIN_LP_RING(6); | 424 | ret = intel_ring_begin(ring, 6); |
421 | if (ret) { | 425 | if (ret) { |
422 | kfree(request); | 426 | kfree(request); |
423 | return ret; | 427 | return ret; |
424 | } | 428 | } |
425 | /* wait for overlay to go idle */ | 429 | /* wait for overlay to go idle */ |
426 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 430 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
427 | OUT_RING(flip_addr); | 431 | intel_ring_emit(ring, flip_addr); |
428 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 432 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
429 | /* turn overlay off */ | 433 | /* turn overlay off */ |
430 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | 434 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); |
431 | OUT_RING(flip_addr); | 435 | intel_ring_emit(ring, flip_addr); |
432 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 436 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
433 | ADVANCE_LP_RING(); | 437 | intel_ring_advance(ring); |
434 | 438 | ||
435 | return intel_overlay_do_wait_request(overlay, request, | 439 | return intel_overlay_do_wait_request(overlay, request, |
436 | intel_overlay_off_tail); | 440 | intel_overlay_off_tail); |
@@ -442,12 +446,13 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) | |||
442 | { | 446 | { |
443 | struct drm_device *dev = overlay->dev; | 447 | struct drm_device *dev = overlay->dev; |
444 | drm_i915_private_t *dev_priv = dev->dev_private; | 448 | drm_i915_private_t *dev_priv = dev->dev_private; |
449 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
445 | int ret; | 450 | int ret; |
446 | 451 | ||
447 | if (overlay->last_flip_req == 0) | 452 | if (overlay->last_flip_req == 0) |
448 | return 0; | 453 | return 0; |
449 | 454 | ||
450 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 455 | ret = i915_wait_request(ring, overlay->last_flip_req); |
451 | if (ret) | 456 | if (ret) |
452 | return ret; | 457 | return ret; |
453 | i915_gem_retire_requests(dev); | 458 | i915_gem_retire_requests(dev); |
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
467 | { | 472 | { |
468 | struct drm_device *dev = overlay->dev; | 473 | struct drm_device *dev = overlay->dev; |
469 | drm_i915_private_t *dev_priv = dev->dev_private; | 474 | drm_i915_private_t *dev_priv = dev->dev_private; |
475 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
470 | int ret; | 476 | int ret; |
471 | 477 | ||
472 | /* Only wait if there is actually an old frame to release to | 478 | /* Only wait if there is actually an old frame to release to |
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
483 | if (request == NULL) | 489 | if (request == NULL) |
484 | return -ENOMEM; | 490 | return -ENOMEM; |
485 | 491 | ||
486 | ret = BEGIN_LP_RING(2); | 492 | ret = intel_ring_begin(ring, 2); |
487 | if (ret) { | 493 | if (ret) { |
488 | kfree(request); | 494 | kfree(request); |
489 | return ret; | 495 | return ret; |
490 | } | 496 | } |
491 | 497 | ||
492 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 498 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
493 | OUT_RING(MI_NOOP); | 499 | intel_ring_emit(ring, MI_NOOP); |
494 | ADVANCE_LP_RING(); | 500 | intel_ring_advance(ring); |
495 | 501 | ||
496 | ret = intel_overlay_do_wait_request(overlay, request, | 502 | ret = intel_overlay_do_wait_request(overlay, request, |
497 | intel_overlay_release_old_vid_tail); | 503 | intel_overlay_release_old_vid_tail); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e66330cc0934..0a3699908fdf 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2436,6 +2436,7 @@ static int ironlake_setup_rc6(struct drm_device *dev) | |||
2436 | void ironlake_enable_rc6(struct drm_device *dev) | 2436 | void ironlake_enable_rc6(struct drm_device *dev) |
2437 | { | 2437 | { |
2438 | struct drm_i915_private *dev_priv = dev->dev_private; | 2438 | struct drm_i915_private *dev_priv = dev->dev_private; |
2439 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
2439 | int ret; | 2440 | int ret; |
2440 | 2441 | ||
2441 | /* rc6 disabled by default due to repeated reports of hanging during | 2442 | /* rc6 disabled by default due to repeated reports of hanging during |
@@ -2455,31 +2456,31 @@ void ironlake_enable_rc6(struct drm_device *dev) | |||
2455 | * GPU can automatically power down the render unit if given a page | 2456 | * GPU can automatically power down the render unit if given a page |
2456 | * to save state. | 2457 | * to save state. |
2457 | */ | 2458 | */ |
2458 | ret = BEGIN_LP_RING(6); | 2459 | ret = intel_ring_begin(ring, 6); |
2459 | if (ret) { | 2460 | if (ret) { |
2460 | ironlake_teardown_rc6(dev); | 2461 | ironlake_teardown_rc6(dev); |
2461 | mutex_unlock(&dev->struct_mutex); | 2462 | mutex_unlock(&dev->struct_mutex); |
2462 | return; | 2463 | return; |
2463 | } | 2464 | } |
2464 | 2465 | ||
2465 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | 2466 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
2466 | OUT_RING(MI_SET_CONTEXT); | 2467 | intel_ring_emit(ring, MI_SET_CONTEXT); |
2467 | OUT_RING(dev_priv->renderctx->gtt_offset | | 2468 | intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | |
2468 | MI_MM_SPACE_GTT | | 2469 | MI_MM_SPACE_GTT | |
2469 | MI_SAVE_EXT_STATE_EN | | 2470 | MI_SAVE_EXT_STATE_EN | |
2470 | MI_RESTORE_EXT_STATE_EN | | 2471 | MI_RESTORE_EXT_STATE_EN | |
2471 | MI_RESTORE_INHIBIT); | 2472 | MI_RESTORE_INHIBIT); |
2472 | OUT_RING(MI_SUSPEND_FLUSH); | 2473 | intel_ring_emit(ring, MI_SUSPEND_FLUSH); |
2473 | OUT_RING(MI_NOOP); | 2474 | intel_ring_emit(ring, MI_NOOP); |
2474 | OUT_RING(MI_FLUSH); | 2475 | intel_ring_emit(ring, MI_FLUSH); |
2475 | ADVANCE_LP_RING(); | 2476 | intel_ring_advance(ring); |
2476 | 2477 | ||
2477 | /* | 2478 | /* |
2478 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW | 2479 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW |
2479 | * does an implicit flush, combined with MI_FLUSH above, it should be | 2480 | * does an implicit flush, combined with MI_FLUSH above, it should be |
2480 | * safe to assume that renderctx is valid | 2481 | * safe to assume that renderctx is valid |
2481 | */ | 2482 | */ |
2482 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); | 2483 | ret = intel_wait_ring_idle(ring); |
2483 | if (ret) { | 2484 | if (ret) { |
2484 | DRM_ERROR("failed to enable ironlake power power savings\n"); | 2485 | DRM_ERROR("failed to enable ironlake power power savings\n"); |
2485 | ironlake_teardown_rc6(dev); | 2486 | ironlake_teardown_rc6(dev); |