aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-02-24 22:39:36 -0500
committerDave Airlie <airlied@redhat.com>2010-02-24 22:39:36 -0500
commit30d6c72c4a760cfc9069ee100786e4d6cf6de59d (patch)
treee2f1b3bad0a161a1e94705cb14d2b8c2760caf91 /drivers/gpu/drm/i915/i915_gem.c
parentde19322d554fd0f449d17610823c38494c06db9e (diff)
parent9df30794f609d9412f14cfd0eb7b45dd64d0b14e (diff)
Merge remote branch 'anholt/drm-intel-next' into drm-next-stage
* anholt/drm-intel-next: drm/i915: Record batch buffer following GPU error drm/i915: give up on 8xx lid status drm/i915: reduce some of the duplication of tiling checking drm/i915: blow away userspace mappings before fence change drm/i915: move a gtt flush to the correct place agp/intel: official names for Pineview and Ironlake drm/i915: overlay: drop superflous gpu flushes drm/i915: overlay: nuke readback to flush wc caches drm/i915: provide self-refresh status in debugfs drm/i915: provide FBC status in debugfs drm/i915: fix drps disable so unload & re-load works drm/i915: Fix OGLC performance regression on 945 drm/i915: Deobfuscate the render p-state obfuscation drm/i915: add dynamic performance control support for Ironlake drm/i915: enable memory self refresh on 9xx drm/i915: Don't reserve compatibility fence regs in KMS mode. drm/i915: Keep MCHBAR always enabled drm/i915: Replace open-coded eviction in i915_gem_idle()
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c158
1 files changed, 55 insertions, 103 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9d87d5a41bd..b5df30ca0fa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2540,6 +2540,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2540 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2540 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2541 return 0; 2541 return 0;
2542 2542
2543 /* If we've changed tiling, GTT-mappings of the object
2544 * need to re-fault to ensure that the correct fence register
2545 * setup is in place.
2546 */
2547 i915_gem_release_mmap(obj);
2548
2543 /* On the i915, GPU access to tiled buffers is via a fence, 2549 /* On the i915, GPU access to tiled buffers is via a fence,
2544 * therefore we must wait for any outstanding access to complete 2550 * therefore we must wait for any outstanding access to complete
2545 * before clearing the fence. 2551 * before clearing the fence.
@@ -2548,12 +2554,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2548 int ret; 2554 int ret;
2549 2555
2550 i915_gem_object_flush_gpu_write_domain(obj); 2556 i915_gem_object_flush_gpu_write_domain(obj);
2551 i915_gem_object_flush_gtt_write_domain(obj);
2552 ret = i915_gem_object_wait_rendering(obj); 2557 ret = i915_gem_object_wait_rendering(obj);
2553 if (ret != 0) 2558 if (ret != 0)
2554 return ret; 2559 return ret;
2555 } 2560 }
2556 2561
2562 i915_gem_object_flush_gtt_write_domain(obj);
2557 i915_gem_clear_fence_reg (obj); 2563 i915_gem_clear_fence_reg (obj);
2558 2564
2559 return 0; 2565 return 0;
@@ -3243,7 +3249,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3243 obj_priv->tiling_mode != I915_TILING_NONE; 3249 obj_priv->tiling_mode != I915_TILING_NONE;
3244 3250
3245 /* Check fence reg constraints and rebind if necessary */ 3251 /* Check fence reg constraints and rebind if necessary */
3246 if (need_fence && !i915_obj_fenceable(dev, obj)) 3252 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3253 obj_priv->tiling_mode))
3247 i915_gem_object_unbind(obj); 3254 i915_gem_object_unbind(obj);
3248 3255
3249 /* Choose the GTT offset for our buffer and put it there. */ 3256 /* Choose the GTT offset for our buffer and put it there. */
@@ -4437,129 +4444,73 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
4437 return 0; 4444 return 0;
4438} 4445}
4439 4446
4440int 4447static int
4441i915_gem_idle(struct drm_device *dev) 4448i915_gpu_idle(struct drm_device *dev)
4442{ 4449{
4443 drm_i915_private_t *dev_priv = dev->dev_private; 4450 drm_i915_private_t *dev_priv = dev->dev_private;
4444 uint32_t seqno, cur_seqno, last_seqno; 4451 bool lists_empty;
4445 int stuck, ret; 4452 uint32_t seqno;
4446 4453
4447 mutex_lock(&dev->struct_mutex); 4454 spin_lock(&dev_priv->mm.active_list_lock);
4455 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4456 list_empty(&dev_priv->mm.active_list);
4457 spin_unlock(&dev_priv->mm.active_list_lock);
4448 4458
4449 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 4459 if (lists_empty)
4450 mutex_unlock(&dev->struct_mutex);
4451 return 0; 4460 return 0;
4452 }
4453
4454 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4455 * We need to replace this with a semaphore, or something.
4456 */
4457 dev_priv->mm.suspended = 1;
4458 del_timer(&dev_priv->hangcheck_timer);
4459
4460 /* Cancel the retire work handler, wait for it to finish if running
4461 */
4462 mutex_unlock(&dev->struct_mutex);
4463 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4464 mutex_lock(&dev->struct_mutex);
4465
4466 i915_kernel_lost_context(dev);
4467 4461
4468 /* Flush the GPU along with all non-CPU write domains 4462 /* Flush everything onto the inactive list. */
4469 */
4470 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 4463 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4471 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 4464 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4472 4465 if (seqno == 0)
4473 if (seqno == 0) {
4474 mutex_unlock(&dev->struct_mutex);
4475 return -ENOMEM; 4466 return -ENOMEM;
4476 }
4477
4478 dev_priv->mm.waiting_gem_seqno = seqno;
4479 last_seqno = 0;
4480 stuck = 0;
4481 for (;;) {
4482 cur_seqno = i915_get_gem_seqno(dev);
4483 if (i915_seqno_passed(cur_seqno, seqno))
4484 break;
4485 if (last_seqno == cur_seqno) {
4486 if (stuck++ > 100) {
4487 DRM_ERROR("hardware wedged\n");
4488 atomic_set(&dev_priv->mm.wedged, 1);
4489 DRM_WAKEUP(&dev_priv->irq_queue);
4490 break;
4491 }
4492 }
4493 msleep(10);
4494 last_seqno = cur_seqno;
4495 }
4496 dev_priv->mm.waiting_gem_seqno = 0;
4497
4498 i915_gem_retire_requests(dev);
4499
4500 spin_lock(&dev_priv->mm.active_list_lock);
4501 if (!atomic_read(&dev_priv->mm.wedged)) {
4502 /* Active and flushing should now be empty as we've
4503 * waited for a sequence higher than any pending execbuffer
4504 */
4505 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4506 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4507 /* Request should now be empty as we've also waited
4508 * for the last request in the list
4509 */
4510 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4511 }
4512
4513 /* Empty the active and flushing lists to inactive. If there's
4514 * anything left at this point, it means that we're wedged and
4515 * nothing good's going to happen by leaving them there. So strip
4516 * the GPU domains and just stuff them onto inactive.
4517 */
4518 while (!list_empty(&dev_priv->mm.active_list)) {
4519 struct drm_gem_object *obj;
4520 uint32_t old_write_domain;
4521
4522 obj = list_first_entry(&dev_priv->mm.active_list,
4523 struct drm_i915_gem_object,
4524 list)->obj;
4525 old_write_domain = obj->write_domain;
4526 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4527 i915_gem_object_move_to_inactive(obj);
4528 4467
4529 trace_i915_gem_object_change_domain(obj, 4468 return i915_wait_request(dev, seqno);
4530 obj->read_domains, 4469}
4531 old_write_domain);
4532 }
4533 spin_unlock(&dev_priv->mm.active_list_lock);
4534 4470
4535 while (!list_empty(&dev_priv->mm.flushing_list)) { 4471int
4536 struct drm_gem_object *obj; 4472i915_gem_idle(struct drm_device *dev)
4537 uint32_t old_write_domain; 4473{
4474 drm_i915_private_t *dev_priv = dev->dev_private;
4475 int ret;
4538 4476
4539 obj = list_first_entry(&dev_priv->mm.flushing_list, 4477 mutex_lock(&dev->struct_mutex);
4540 struct drm_i915_gem_object,
4541 list)->obj;
4542 old_write_domain = obj->write_domain;
4543 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4544 i915_gem_object_move_to_inactive(obj);
4545 4478
4546 trace_i915_gem_object_change_domain(obj, 4479 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4547 obj->read_domains, 4480 mutex_unlock(&dev->struct_mutex);
4548 old_write_domain); 4481 return 0;
4549 } 4482 }
4550 4483
4551 4484 ret = i915_gpu_idle(dev);
4552 /* Move all inactive buffers out of the GTT. */
4553 ret = i915_gem_evict_from_inactive_list(dev);
4554 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4555 if (ret) { 4485 if (ret) {
4556 mutex_unlock(&dev->struct_mutex); 4486 mutex_unlock(&dev->struct_mutex);
4557 return ret; 4487 return ret;
4558 } 4488 }
4559 4489
4490 /* Under UMS, be paranoid and evict. */
4491 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4492 ret = i915_gem_evict_from_inactive_list(dev);
4493 if (ret) {
4494 mutex_unlock(&dev->struct_mutex);
4495 return ret;
4496 }
4497 }
4498
4499 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4500 * We need to replace this with a semaphore, or something.
4501 * And not confound mm.suspended!
4502 */
4503 dev_priv->mm.suspended = 1;
4504 del_timer(&dev_priv->hangcheck_timer);
4505
4506 i915_kernel_lost_context(dev);
4560 i915_gem_cleanup_ringbuffer(dev); 4507 i915_gem_cleanup_ringbuffer(dev);
4508
4561 mutex_unlock(&dev->struct_mutex); 4509 mutex_unlock(&dev->struct_mutex);
4562 4510
4511 /* Cancel the retire work handler, which should be idle now. */
4512 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4513
4563 return 0; 4514 return 0;
4564} 4515}
4565 4516
@@ -4846,7 +4797,8 @@ i915_gem_load(struct drm_device *dev)
4846 spin_unlock(&shrink_list_lock); 4797 spin_unlock(&shrink_list_lock);
4847 4798
4848 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4799 /* Old X drivers will take 0-2 for front, back, depth buffers */
4849 dev_priv->fence_reg_start = 3; 4800 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4801 dev_priv->fence_reg_start = 3;
4850 4802
4851 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4803 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4852 dev_priv->num_fence_regs = 16; 4804 dev_priv->num_fence_regs = 16;