aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-06-28 14:15:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-06-28 14:15:57 -0400
commit0d72c6fcb5cd215eeab824fac216ea42c5b574aa (patch)
treeb2777db7513e14b0341088eea96c869a8071abc4 /drivers/gpu/drm
parentc89b857ce6d803905b2c9d71bc9effdd286c45ed (diff)
parentf01c22fd59aa10a3738ede20fd4b9b6fd1e2eac3 (diff)
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6: drm/i915: Use chipset-specific irq installers drm/i915: forcewake fix after reset drm/i915: add Ivy Bridge page flip support drm/i915: split page flip queueing into per-chipset functions
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c305
4 files changed, 230 insertions, 85 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd4270594..609358faaa90 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
579 } else switch (INTEL_INFO(dev)->gen) { 579 } else switch (INTEL_INFO(dev)->gen) {
580 case 6: 580 case 6:
581 ret = gen6_do_reset(dev, flags); 581 ret = gen6_do_reset(dev, flags);
582 /* If reset with a user forcewake, try to restore */
583 if (atomic_read(&dev_priv->forcewake_count))
584 __gen6_gt_force_wake_get(dev_priv);
582 break; 585 break;
583 case 5: 586 case 5:
584 ret = ironlake_do_reset(dev, flags); 587 ret = ironlake_do_reset(dev, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f124..eddabf68e97a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
211 void (*fdi_link_train)(struct drm_crtc *crtc); 211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev); 212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev); 213 void (*init_pch_clock_gating)(struct drm_device *dev);
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj);
214 /* clock updates for mode set */ 217 /* clock updates for mode set */
215 /* cursor updates */ 218 /* cursor updates */
216 /* render clock increase/decrease */ 219 /* render clock increase/decrease */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 85f713746a1f..5c0d1247f453 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2072,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
2072 if (!ier) { 2072 if (!ier) {
2073 DRM_ERROR("something (likely vbetool) disabled " 2073 DRM_ERROR("something (likely vbetool) disabled "
2074 "interrupts, re-enabling\n"); 2074 "interrupts, re-enabling\n");
2075 i915_driver_irq_preinstall(ring->dev); 2075 ring->dev->driver->irq_preinstall(ring->dev);
2076 i915_driver_irq_postinstall(ring->dev); 2076 ring->dev->driver->irq_postinstall(ring->dev);
2077 } 2077 }
2078 2078
2079 trace_i915_gem_request_wait_begin(ring, seqno); 2079 trace_i915_gem_request_wait_begin(ring, seqno);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index aa43e7be6053..21b6f93fe919 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6261,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6261 spin_unlock_irqrestore(&dev->event_lock, flags); 6261 spin_unlock_irqrestore(&dev->event_lock, flags);
6262} 6262}
6263 6263
6264static int intel_gen2_queue_flip(struct drm_device *dev,
6265 struct drm_crtc *crtc,
6266 struct drm_framebuffer *fb,
6267 struct drm_i915_gem_object *obj)
6268{
6269 struct drm_i915_private *dev_priv = dev->dev_private;
6270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6271 unsigned long offset;
6272 u32 flip_mask;
6273 int ret;
6274
6275 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6276 if (ret)
6277 goto out;
6278
6279 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6280 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6281
6282 ret = BEGIN_LP_RING(6);
6283 if (ret)
6284 goto out;
6285
6286 /* Can't queue multiple flips, so wait for the previous
6287 * one to finish before executing the next.
6288 */
6289 if (intel_crtc->plane)
6290 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6291 else
6292 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6293 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6294 OUT_RING(MI_NOOP);
6295 OUT_RING(MI_DISPLAY_FLIP |
6296 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6297 OUT_RING(fb->pitch);
6298 OUT_RING(obj->gtt_offset + offset);
6299 OUT_RING(MI_NOOP);
6300 ADVANCE_LP_RING();
6301out:
6302 return ret;
6303}
6304
6305static int intel_gen3_queue_flip(struct drm_device *dev,
6306 struct drm_crtc *crtc,
6307 struct drm_framebuffer *fb,
6308 struct drm_i915_gem_object *obj)
6309{
6310 struct drm_i915_private *dev_priv = dev->dev_private;
6311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6312 unsigned long offset;
6313 u32 flip_mask;
6314 int ret;
6315
6316 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6317 if (ret)
6318 goto out;
6319
6320 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6321 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6322
6323 ret = BEGIN_LP_RING(6);
6324 if (ret)
6325 goto out;
6326
6327 if (intel_crtc->plane)
6328 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6329 else
6330 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6331 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6332 OUT_RING(MI_NOOP);
6333 OUT_RING(MI_DISPLAY_FLIP_I915 |
6334 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6335 OUT_RING(fb->pitch);
6336 OUT_RING(obj->gtt_offset + offset);
6337 OUT_RING(MI_NOOP);
6338
6339 ADVANCE_LP_RING();
6340out:
6341 return ret;
6342}
6343
6344static int intel_gen4_queue_flip(struct drm_device *dev,
6345 struct drm_crtc *crtc,
6346 struct drm_framebuffer *fb,
6347 struct drm_i915_gem_object *obj)
6348{
6349 struct drm_i915_private *dev_priv = dev->dev_private;
6350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6351 uint32_t pf, pipesrc;
6352 int ret;
6353
6354 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6355 if (ret)
6356 goto out;
6357
6358 ret = BEGIN_LP_RING(4);
6359 if (ret)
6360 goto out;
6361
6362 /* i965+ uses the linear or tiled offsets from the
6363 * Display Registers (which do not change across a page-flip)
6364 * so we need only reprogram the base address.
6365 */
6366 OUT_RING(MI_DISPLAY_FLIP |
6367 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6368 OUT_RING(fb->pitch);
6369 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6370
6371 /* XXX Enabling the panel-fitter across page-flip is so far
6372 * untested on non-native modes, so ignore it for now.
6373 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6374 */
6375 pf = 0;
6376 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6377 OUT_RING(pf | pipesrc);
6378 ADVANCE_LP_RING();
6379out:
6380 return ret;
6381}
6382
6383static int intel_gen6_queue_flip(struct drm_device *dev,
6384 struct drm_crtc *crtc,
6385 struct drm_framebuffer *fb,
6386 struct drm_i915_gem_object *obj)
6387{
6388 struct drm_i915_private *dev_priv = dev->dev_private;
6389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6390 uint32_t pf, pipesrc;
6391 int ret;
6392
6393 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6394 if (ret)
6395 goto out;
6396
6397 ret = BEGIN_LP_RING(4);
6398 if (ret)
6399 goto out;
6400
6401 OUT_RING(MI_DISPLAY_FLIP |
6402 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6403 OUT_RING(fb->pitch | obj->tiling_mode);
6404 OUT_RING(obj->gtt_offset);
6405
6406 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6407 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6408 OUT_RING(pf | pipesrc);
6409 ADVANCE_LP_RING();
6410out:
6411 return ret;
6412}
6413
6414/*
6415 * On gen7 we currently use the blit ring because (in early silicon at least)
6416 * the render ring doesn't give us interrpts for page flip completion, which
6417 * means clients will hang after the first flip is queued. Fortunately the
6418 * blit ring generates interrupts properly, so use it instead.
6419 */
6420static int intel_gen7_queue_flip(struct drm_device *dev,
6421 struct drm_crtc *crtc,
6422 struct drm_framebuffer *fb,
6423 struct drm_i915_gem_object *obj)
6424{
6425 struct drm_i915_private *dev_priv = dev->dev_private;
6426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6427 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6428 int ret;
6429
6430 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6431 if (ret)
6432 goto out;
6433
6434 ret = intel_ring_begin(ring, 4);
6435 if (ret)
6436 goto out;
6437
6438 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6439 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6440 intel_ring_emit(ring, (obj->gtt_offset));
6441 intel_ring_emit(ring, (MI_NOOP));
6442 intel_ring_advance(ring);
6443out:
6444 return ret;
6445}
6446
6447static int intel_default_queue_flip(struct drm_device *dev,
6448 struct drm_crtc *crtc,
6449 struct drm_framebuffer *fb,
6450 struct drm_i915_gem_object *obj)
6451{
6452 return -ENODEV;
6453}
6454
6264static int intel_crtc_page_flip(struct drm_crtc *crtc, 6455static int intel_crtc_page_flip(struct drm_crtc *crtc,
6265 struct drm_framebuffer *fb, 6456 struct drm_framebuffer *fb,
6266 struct drm_pending_vblank_event *event) 6457 struct drm_pending_vblank_event *event)
@@ -6271,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6271 struct drm_i915_gem_object *obj; 6462 struct drm_i915_gem_object *obj;
6272 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6463 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6273 struct intel_unpin_work *work; 6464 struct intel_unpin_work *work;
6274 unsigned long flags, offset; 6465 unsigned long flags;
6275 int pipe = intel_crtc->pipe;
6276 u32 pf, pipesrc;
6277 int ret; 6466 int ret;
6278 6467
6279 work = kzalloc(sizeof *work, GFP_KERNEL); 6468 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6302,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6302 obj = intel_fb->obj; 6491 obj = intel_fb->obj;
6303 6492
6304 mutex_lock(&dev->struct_mutex); 6493 mutex_lock(&dev->struct_mutex);
6305 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6306 if (ret)
6307 goto cleanup_work;
6308 6494
6309 /* Reference the objects for the scheduled work. */ 6495 /* Reference the objects for the scheduled work. */
6310 drm_gem_object_reference(&work->old_fb_obj->base); 6496 drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6316,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6316 if (ret) 6502 if (ret)
6317 goto cleanup_objs; 6503 goto cleanup_objs;
6318 6504
6319 if (IS_GEN3(dev) || IS_GEN2(dev)) {
6320 u32 flip_mask;
6321
6322 /* Can't queue multiple flips, so wait for the previous
6323 * one to finish before executing the next.
6324 */
6325 ret = BEGIN_LP_RING(2);
6326 if (ret)
6327 goto cleanup_objs;
6328
6329 if (intel_crtc->plane)
6330 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6331 else
6332 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6333 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6334 OUT_RING(MI_NOOP);
6335 ADVANCE_LP_RING();
6336 }
6337
6338 work->pending_flip_obj = obj; 6505 work->pending_flip_obj = obj;
6339 6506
6340 work->enable_stall_check = true; 6507 work->enable_stall_check = true;
6341 6508
6342 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6343 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6344
6345 ret = BEGIN_LP_RING(4);
6346 if (ret)
6347 goto cleanup_objs;
6348
6349 /* Block clients from rendering to the new back buffer until 6509 /* Block clients from rendering to the new back buffer until
6350 * the flip occurs and the object is no longer visible. 6510 * the flip occurs and the object is no longer visible.
6351 */ 6511 */
6352 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6512 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6353 6513
6354 switch (INTEL_INFO(dev)->gen) { 6514 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6355 case 2: 6515 if (ret)
6356 OUT_RING(MI_DISPLAY_FLIP | 6516 goto cleanup_pending;
6357 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6358 OUT_RING(fb->pitch);
6359 OUT_RING(obj->gtt_offset + offset);
6360 OUT_RING(MI_NOOP);
6361 break;
6362
6363 case 3:
6364 OUT_RING(MI_DISPLAY_FLIP_I915 |
6365 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6366 OUT_RING(fb->pitch);
6367 OUT_RING(obj->gtt_offset + offset);
6368 OUT_RING(MI_NOOP);
6369 break;
6370
6371 case 4:
6372 case 5:
6373 /* i965+ uses the linear or tiled offsets from the
6374 * Display Registers (which do not change across a page-flip)
6375 * so we need only reprogram the base address.
6376 */
6377 OUT_RING(MI_DISPLAY_FLIP |
6378 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6379 OUT_RING(fb->pitch);
6380 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6381
6382 /* XXX Enabling the panel-fitter across page-flip is so far
6383 * untested on non-native modes, so ignore it for now.
6384 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6385 */
6386 pf = 0;
6387 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6388 OUT_RING(pf | pipesrc);
6389 break;
6390
6391 case 6:
6392 case 7:
6393 OUT_RING(MI_DISPLAY_FLIP |
6394 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6395 OUT_RING(fb->pitch | obj->tiling_mode);
6396 OUT_RING(obj->gtt_offset);
6397
6398 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
6399 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6400 OUT_RING(pf | pipesrc);
6401 break;
6402 }
6403 ADVANCE_LP_RING();
6404 6517
6405 mutex_unlock(&dev->struct_mutex); 6518 mutex_unlock(&dev->struct_mutex);
6406 6519
@@ -6408,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6408 6521
6409 return 0; 6522 return 0;
6410 6523
6524cleanup_pending:
6525 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6411cleanup_objs: 6526cleanup_objs:
6412 drm_gem_object_unreference(&work->old_fb_obj->base); 6527 drm_gem_object_unreference(&work->old_fb_obj->base);
6413 drm_gem_object_unreference(&obj->base); 6528 drm_gem_object_unreference(&obj->base);
6414cleanup_work:
6415 mutex_unlock(&dev->struct_mutex); 6529 mutex_unlock(&dev->struct_mutex);
6416 6530
6417 spin_lock_irqsave(&dev->event_lock, flags); 6531 spin_lock_irqsave(&dev->event_lock, flags);
@@ -7656,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
7656 else 7770 else
7657 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7771 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7658 } 7772 }
7773
7774 /* Default just returns -ENODEV to indicate unsupported */
7775 dev_priv->display.queue_flip = intel_default_queue_flip;
7776
7777 switch (INTEL_INFO(dev)->gen) {
7778 case 2:
7779 dev_priv->display.queue_flip = intel_gen2_queue_flip;
7780 break;
7781
7782 case 3:
7783 dev_priv->display.queue_flip = intel_gen3_queue_flip;
7784 break;
7785
7786 case 4:
7787 case 5:
7788 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7789 break;
7790
7791 case 6:
7792 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7793 break;
7794 case 7:
7795 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7796 break;
7797 }
7659} 7798}
7660 7799
7661/* 7800/*