diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-24 07:38:05 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-24 15:22:51 -0400 |
commit | 641934069d29211baf82afb93622a426172b67b6 (patch) | |
tree | d00f3e69c1c02ddc96a43f744644e263a8e77a9d /drivers/gpu/drm/i915/i915_gem.c | |
parent | b6651458d33c309767762a6c3da041573413fd88 (diff) |
drm/i915: Move gpu_write_list to per-ring
... to prevent flush processing of an idle (or even absent) ring.
This fixes a regression during suspend from 87acb0a5.
Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net>
Tested-by: Peter Clifton <pcjc2@cam.ac.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e7f27a5b89d..6c2618d884e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1657 | struct drm_i915_gem_object *obj_priv, *next; | 1657 | struct drm_i915_gem_object *obj_priv, *next; |
1658 | 1658 | ||
1659 | list_for_each_entry_safe(obj_priv, next, | 1659 | list_for_each_entry_safe(obj_priv, next, |
1660 | &dev_priv->mm.gpu_write_list, | 1660 | &ring->gpu_write_list, |
1661 | gpu_write_list) { | 1661 | gpu_write_list) { |
1662 | struct drm_gem_object *obj = &obj_priv->base; | 1662 | struct drm_gem_object *obj = &obj_priv->base; |
1663 | 1663 | ||
1664 | if (obj->write_domain & flush_domains && | 1664 | if (obj->write_domain & flush_domains) { |
1665 | obj_priv->ring == ring) { | ||
1666 | uint32_t old_write_domain = obj->write_domain; | 1665 | uint32_t old_write_domain = obj->write_domain; |
1667 | 1666 | ||
1668 | obj->write_domain = 0; | 1667 | obj->write_domain = 0; |
@@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2173 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2174 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2175 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | ||
2176 | return 0; | ||
2177 | |||
2176 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
2177 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2179 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2178 | return i915_wait_request(dev, | 2180 | return i915_wait_request(dev, |
@@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3786 | 3788 | ||
3787 | for (i = 0; i < args->buffer_count; i++) { | 3789 | for (i = 0; i < args->buffer_count; i++) { |
3788 | struct drm_gem_object *obj = object_list[i]; | 3790 | struct drm_gem_object *obj = object_list[i]; |
3789 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3790 | uint32_t old_write_domain = obj->write_domain; | 3791 | uint32_t old_write_domain = obj->write_domain; |
3791 | |||
3792 | obj->write_domain = obj->pending_write_domain; | 3792 | obj->write_domain = obj->pending_write_domain; |
3793 | if (obj->write_domain) | ||
3794 | list_move_tail(&obj_priv->gpu_write_list, | ||
3795 | &dev_priv->mm.gpu_write_list); | ||
3796 | |||
3797 | trace_i915_gem_object_change_domain(obj, | 3793 | trace_i915_gem_object_change_domain(obj, |
3798 | obj->read_domains, | 3794 | obj->read_domains, |
3799 | old_write_domain); | 3795 | old_write_domain); |
@@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3858 | 3854 | ||
3859 | for (i = 0; i < args->buffer_count; i++) { | 3855 | for (i = 0; i < args->buffer_count; i++) { |
3860 | struct drm_gem_object *obj = object_list[i]; | 3856 | struct drm_gem_object *obj = object_list[i]; |
3861 | obj_priv = to_intel_bo(obj); | ||
3862 | 3857 | ||
3863 | i915_gem_object_move_to_active(obj, ring); | 3858 | i915_gem_object_move_to_active(obj, ring); |
3859 | if (obj->write_domain) | ||
3860 | list_move_tail(&to_intel_bo(obj)->gpu_write_list, | ||
3861 | &ring->gpu_write_list); | ||
3864 | } | 3862 | } |
3865 | 3863 | ||
3866 | i915_add_request(dev, file, request, ring); | 3864 | i915_add_request(dev, file, request, ring); |
@@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4618 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 4616 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4619 | } | 4617 | } |
4620 | 4618 | ||
4619 | static void | ||
4620 | init_ring_lists(struct intel_ring_buffer *ring) | ||
4621 | { | ||
4622 | INIT_LIST_HEAD(&ring->active_list); | ||
4623 | INIT_LIST_HEAD(&ring->request_list); | ||
4624 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
4625 | } | ||
4626 | |||
4621 | void | 4627 | void |
4622 | i915_gem_load(struct drm_device *dev) | 4628 | i915_gem_load(struct drm_device *dev) |
4623 | { | 4629 | { |
@@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev) | |||
4626 | 4632 | ||
4627 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4633 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4628 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4634 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4629 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4630 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4635 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4631 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | 4636 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
4632 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4637 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4633 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 4638 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4634 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4639 | init_ring_lists(&dev_priv->render_ring); |
4635 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 4640 | init_ring_lists(&dev_priv->bsd_ring); |
4636 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | 4641 | init_ring_lists(&dev_priv->blt_ring); |
4637 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | ||
4638 | INIT_LIST_HEAD(&dev_priv->blt_ring.active_list); | ||
4639 | INIT_LIST_HEAD(&dev_priv->blt_ring.request_list); | ||
4640 | for (i = 0; i < 16; i++) | 4642 | for (i = 0; i < 16; i++) |
4641 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4643 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4642 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4644 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |