aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2011-06-26 22:12:00 -0400
committerKeith Packard <keithp@keithp.com>2011-06-26 22:12:00 -0400
commit8bc47de33569a111092a48465eb26cd855117e27 (patch)
tree458987145929e8bedc09c17a1122b6db7f14dcb2 /drivers/gpu/drm/i915
parent93dbb29b471c80e09df8bfde9c661df074ec82cc (diff)
parent7c9017e5b77118439952fe8dc22809bae4fae4b6 (diff)
Merge branch 'drm-intel-fixes' into drm-intel-next
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c305
2 files changed, 225 insertions, 83 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8a9fd9177860..01affb63be29 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
211 void (*fdi_link_train)(struct drm_crtc *crtc); 211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev); 212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev); 213 void (*init_pch_clock_gating)(struct drm_device *dev);
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj);
214 /* clock updates for mode set */ 217 /* clock updates for mode set */
215 /* cursor updates */ 218 /* cursor updates */
216 /* render clock increase/decrease */ 219 /* render clock increase/decrease */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 86a3ec1469ba..e58627f580c6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6251,6 +6251,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6251 spin_unlock_irqrestore(&dev->event_lock, flags); 6251 spin_unlock_irqrestore(&dev->event_lock, flags);
6252} 6252}
6253 6253
6254static int intel_gen2_queue_flip(struct drm_device *dev,
6255 struct drm_crtc *crtc,
6256 struct drm_framebuffer *fb,
6257 struct drm_i915_gem_object *obj)
6258{
6259 struct drm_i915_private *dev_priv = dev->dev_private;
6260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6261 unsigned long offset;
6262 u32 flip_mask;
6263 int ret;
6264
6265 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6266 if (ret)
6267 goto out;
6268
6269 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6270 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6271
6272 ret = BEGIN_LP_RING(6);
6273 if (ret)
6274 goto out;
6275
6276 /* Can't queue multiple flips, so wait for the previous
6277 * one to finish before executing the next.
6278 */
6279 if (intel_crtc->plane)
6280 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6281 else
6282 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6283 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6284 OUT_RING(MI_NOOP);
6285 OUT_RING(MI_DISPLAY_FLIP |
6286 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6287 OUT_RING(fb->pitch);
6288 OUT_RING(obj->gtt_offset + offset);
6289 OUT_RING(MI_NOOP);
6290 ADVANCE_LP_RING();
6291out:
6292 return ret;
6293}
6294
6295static int intel_gen3_queue_flip(struct drm_device *dev,
6296 struct drm_crtc *crtc,
6297 struct drm_framebuffer *fb,
6298 struct drm_i915_gem_object *obj)
6299{
6300 struct drm_i915_private *dev_priv = dev->dev_private;
6301 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6302 unsigned long offset;
6303 u32 flip_mask;
6304 int ret;
6305
6306 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6307 if (ret)
6308 goto out;
6309
6310 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6311 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6312
6313 ret = BEGIN_LP_RING(6);
6314 if (ret)
6315 goto out;
6316
6317 if (intel_crtc->plane)
6318 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6319 else
6320 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6321 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6322 OUT_RING(MI_NOOP);
6323 OUT_RING(MI_DISPLAY_FLIP_I915 |
6324 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6325 OUT_RING(fb->pitch);
6326 OUT_RING(obj->gtt_offset + offset);
6327 OUT_RING(MI_NOOP);
6328
6329 ADVANCE_LP_RING();
6330out:
6331 return ret;
6332}
6333
6334static int intel_gen4_queue_flip(struct drm_device *dev,
6335 struct drm_crtc *crtc,
6336 struct drm_framebuffer *fb,
6337 struct drm_i915_gem_object *obj)
6338{
6339 struct drm_i915_private *dev_priv = dev->dev_private;
6340 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6341 uint32_t pf, pipesrc;
6342 int ret;
6343
6344 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6345 if (ret)
6346 goto out;
6347
6348 ret = BEGIN_LP_RING(4);
6349 if (ret)
6350 goto out;
6351
6352 /* i965+ uses the linear or tiled offsets from the
6353 * Display Registers (which do not change across a page-flip)
6354 * so we need only reprogram the base address.
6355 */
6356 OUT_RING(MI_DISPLAY_FLIP |
6357 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6358 OUT_RING(fb->pitch);
6359 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6360
6361 /* XXX Enabling the panel-fitter across page-flip is so far
6362 * untested on non-native modes, so ignore it for now.
6363 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6364 */
6365 pf = 0;
6366 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6367 OUT_RING(pf | pipesrc);
6368 ADVANCE_LP_RING();
6369out:
6370 return ret;
6371}
6372
6373static int intel_gen6_queue_flip(struct drm_device *dev,
6374 struct drm_crtc *crtc,
6375 struct drm_framebuffer *fb,
6376 struct drm_i915_gem_object *obj)
6377{
6378 struct drm_i915_private *dev_priv = dev->dev_private;
6379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6380 uint32_t pf, pipesrc;
6381 int ret;
6382
6383 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6384 if (ret)
6385 goto out;
6386
6387 ret = BEGIN_LP_RING(4);
6388 if (ret)
6389 goto out;
6390
6391 OUT_RING(MI_DISPLAY_FLIP |
6392 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6393 OUT_RING(fb->pitch | obj->tiling_mode);
6394 OUT_RING(obj->gtt_offset);
6395
6396 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6397 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6398 OUT_RING(pf | pipesrc);
6399 ADVANCE_LP_RING();
6400out:
6401 return ret;
6402}
6403
6404/*
6405 * On gen7 we currently use the blit ring because (in early silicon at least)
6406 * the render ring doesn't give us interrpts for page flip completion, which
6407 * means clients will hang after the first flip is queued. Fortunately the
6408 * blit ring generates interrupts properly, so use it instead.
6409 */
6410static int intel_gen7_queue_flip(struct drm_device *dev,
6411 struct drm_crtc *crtc,
6412 struct drm_framebuffer *fb,
6413 struct drm_i915_gem_object *obj)
6414{
6415 struct drm_i915_private *dev_priv = dev->dev_private;
6416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6417 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6418 int ret;
6419
6420 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6421 if (ret)
6422 goto out;
6423
6424 ret = intel_ring_begin(ring, 4);
6425 if (ret)
6426 goto out;
6427
6428 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6429 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6430 intel_ring_emit(ring, (obj->gtt_offset));
6431 intel_ring_emit(ring, (MI_NOOP));
6432 intel_ring_advance(ring);
6433out:
6434 return ret;
6435}
6436
6437static int intel_default_queue_flip(struct drm_device *dev,
6438 struct drm_crtc *crtc,
6439 struct drm_framebuffer *fb,
6440 struct drm_i915_gem_object *obj)
6441{
6442 return -ENODEV;
6443}
6444
6254static int intel_crtc_page_flip(struct drm_crtc *crtc, 6445static int intel_crtc_page_flip(struct drm_crtc *crtc,
6255 struct drm_framebuffer *fb, 6446 struct drm_framebuffer *fb,
6256 struct drm_pending_vblank_event *event) 6447 struct drm_pending_vblank_event *event)
@@ -6261,9 +6452,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6261 struct drm_i915_gem_object *obj; 6452 struct drm_i915_gem_object *obj;
6262 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6453 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6263 struct intel_unpin_work *work; 6454 struct intel_unpin_work *work;
6264 unsigned long flags, offset; 6455 unsigned long flags;
6265 int pipe = intel_crtc->pipe;
6266 u32 pf, pipesrc;
6267 int ret; 6456 int ret;
6268 6457
6269 work = kzalloc(sizeof *work, GFP_KERNEL); 6458 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6292,9 +6481,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6292 obj = intel_fb->obj; 6481 obj = intel_fb->obj;
6293 6482
6294 mutex_lock(&dev->struct_mutex); 6483 mutex_lock(&dev->struct_mutex);
6295 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6296 if (ret)
6297 goto cleanup_work;
6298 6484
6299 /* Reference the objects for the scheduled work. */ 6485 /* Reference the objects for the scheduled work. */
6300 drm_gem_object_reference(&work->old_fb_obj->base); 6486 drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6306,91 +6492,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6306 if (ret) 6492 if (ret)
6307 goto cleanup_objs; 6493 goto cleanup_objs;
6308 6494
6309 if (IS_GEN3(dev) || IS_GEN2(dev)) {
6310 u32 flip_mask;
6311
6312 /* Can't queue multiple flips, so wait for the previous
6313 * one to finish before executing the next.
6314 */
6315 ret = BEGIN_LP_RING(2);
6316 if (ret)
6317 goto cleanup_objs;
6318
6319 if (intel_crtc->plane)
6320 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6321 else
6322 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6323 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6324 OUT_RING(MI_NOOP);
6325 ADVANCE_LP_RING();
6326 }
6327
6328 work->pending_flip_obj = obj; 6495 work->pending_flip_obj = obj;
6329 6496
6330 work->enable_stall_check = true; 6497 work->enable_stall_check = true;
6331 6498
6332 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6333 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6334
6335 ret = BEGIN_LP_RING(4);
6336 if (ret)
6337 goto cleanup_objs;
6338
6339 /* Block clients from rendering to the new back buffer until 6499 /* Block clients from rendering to the new back buffer until
6340 * the flip occurs and the object is no longer visible. 6500 * the flip occurs and the object is no longer visible.
6341 */ 6501 */
6342 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6502 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6343 6503
6344 switch (INTEL_INFO(dev)->gen) { 6504 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6345 case 2: 6505 if (ret)
6346 OUT_RING(MI_DISPLAY_FLIP | 6506 goto cleanup_pending;
6347 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6348 OUT_RING(fb->pitch);
6349 OUT_RING(obj->gtt_offset + offset);
6350 OUT_RING(MI_NOOP);
6351 break;
6352
6353 case 3:
6354 OUT_RING(MI_DISPLAY_FLIP_I915 |
6355 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6356 OUT_RING(fb->pitch);
6357 OUT_RING(obj->gtt_offset + offset);
6358 OUT_RING(MI_NOOP);
6359 break;
6360
6361 case 4:
6362 case 5:
6363 /* i965+ uses the linear or tiled offsets from the
6364 * Display Registers (which do not change across a page-flip)
6365 * so we need only reprogram the base address.
6366 */
6367 OUT_RING(MI_DISPLAY_FLIP |
6368 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6369 OUT_RING(fb->pitch);
6370 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6371
6372 /* XXX Enabling the panel-fitter across page-flip is so far
6373 * untested on non-native modes, so ignore it for now.
6374 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6375 */
6376 pf = 0;
6377 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6378 OUT_RING(pf | pipesrc);
6379 break;
6380
6381 case 6:
6382 case 7:
6383 OUT_RING(MI_DISPLAY_FLIP |
6384 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6385 OUT_RING(fb->pitch | obj->tiling_mode);
6386 OUT_RING(obj->gtt_offset);
6387
6388 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
6389 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6390 OUT_RING(pf | pipesrc);
6391 break;
6392 }
6393 ADVANCE_LP_RING();
6394 6507
6395 mutex_unlock(&dev->struct_mutex); 6508 mutex_unlock(&dev->struct_mutex);
6396 6509
@@ -6398,10 +6511,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6398 6511
6399 return 0; 6512 return 0;
6400 6513
6514cleanup_pending:
6515 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6401cleanup_objs: 6516cleanup_objs:
6402 drm_gem_object_unreference(&work->old_fb_obj->base); 6517 drm_gem_object_unreference(&work->old_fb_obj->base);
6403 drm_gem_object_unreference(&obj->base); 6518 drm_gem_object_unreference(&obj->base);
6404cleanup_work:
6405 mutex_unlock(&dev->struct_mutex); 6519 mutex_unlock(&dev->struct_mutex);
6406 6520
6407 spin_lock_irqsave(&dev->event_lock, flags); 6521 spin_lock_irqsave(&dev->event_lock, flags);
@@ -7646,6 +7760,31 @@ static void intel_init_display(struct drm_device *dev)
7646 else 7760 else
7647 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7761 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7648 } 7762 }
7763
7764 /* Default just returns -ENODEV to indicate unsupported */
7765 dev_priv->display.queue_flip = intel_default_queue_flip;
7766
7767 switch (INTEL_INFO(dev)->gen) {
7768 case 2:
7769 dev_priv->display.queue_flip = intel_gen2_queue_flip;
7770 break;
7771
7772 case 3:
7773 dev_priv->display.queue_flip = intel_gen3_queue_flip;
7774 break;
7775
7776 case 4:
7777 case 5:
7778 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7779 break;
7780
7781 case 6:
7782 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7783 break;
7784 case 7:
7785 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7786 break;
7787 }
7649} 7788}
7650 7789
7651/* 7790/*